Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/evaluate-0.4.1.dist-info/RECORD +64 -0
- env-llmeval/lib/python3.10/site-packages/evaluate-0.4.1.dist-info/WHEEL +5 -0
- env-llmeval/lib/python3.10/site-packages/evaluate-0.4.1.dist-info/top_level.txt +1 -0
- env-llmeval/lib/python3.10/site-packages/frozenlist/__init__.py +95 -0
- env-llmeval/lib/python3.10/site-packages/frozenlist/__init__.pyi +47 -0
- env-llmeval/lib/python3.10/site-packages/frozenlist/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/frozenlist/_frozenlist.cpython-310-x86_64-linux-gnu.so +0 -0
- env-llmeval/lib/python3.10/site-packages/frozenlist/_frozenlist.pyx +123 -0
- env-llmeval/lib/python3.10/site-packages/frozenlist/py.typed +1 -0
- env-llmeval/lib/python3.10/site-packages/python_dateutil-2.9.0.post0.dist-info/INSTALLER +1 -0
- env-llmeval/lib/python3.10/site-packages/torch/_C/_VariableFunctions.pyi +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_C/_autograd.pyi +123 -0
- env-llmeval/lib/python3.10/site-packages/torch/_C/_cpu.pyi +5 -0
- env-llmeval/lib/python3.10/site-packages/torch/_C/_distributed_autograd.pyi +26 -0
- env-llmeval/lib/python3.10/site-packages/torch/_C/_distributed_c10d.pyi +478 -0
- env-llmeval/lib/python3.10/site-packages/torch/_C/_distributed_rpc_testing.pyi +35 -0
- env-llmeval/lib/python3.10/site-packages/torch/_C/_functorch.pyi +71 -0
- env-llmeval/lib/python3.10/site-packages/torch/_C/_itt.pyi +5 -0
- env-llmeval/lib/python3.10/site-packages/torch/_C/_lazy.pyi +28 -0
- env-llmeval/lib/python3.10/site-packages/torch/_C/_lazy_ts_backend.pyi +11 -0
- env-llmeval/lib/python3.10/site-packages/torch/_C/_monitor.pyi +44 -0
- env-llmeval/lib/python3.10/site-packages/torch/_C/_profiler.pyi +238 -0
- env-llmeval/lib/python3.10/site-packages/torch/_refs/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_refs/__pycache__/_conversions.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_refs/__pycache__/fft.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_refs/_conversions.py +118 -0
- env-llmeval/lib/python3.10/site-packages/torch/_refs/fft.py +590 -0
- env-llmeval/lib/python3.10/site-packages/torch/_refs/linalg/__init__.py +276 -0
- env-llmeval/lib/python3.10/site-packages/torch/_refs/linalg/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_refs/nn/__init__.py +3 -0
- env-llmeval/lib/python3.10/site-packages/torch/_refs/nn/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_refs/nn/functional/__init__.py +1174 -0
- env-llmeval/lib/python3.10/site-packages/torch/_refs/nn/functional/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_refs/special/__init__.py +236 -0
- env-llmeval/lib/python3.10/site-packages/torch/_refs/special/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/fake_tensor.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/fake_utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/functional_tensor.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/meta_utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/schema_check_mode.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_subclasses/fake_utils.py +188 -0
- env-llmeval/lib/python3.10/site-packages/torch/_subclasses/functional_tensor.py +552 -0
- env-llmeval/lib/python3.10/site-packages/torch/_subclasses/meta_utils.py +730 -0
- env-llmeval/lib/python3.10/site-packages/torch/_subclasses/schema_check_mode.py +196 -0
- env-llmeval/lib/python3.10/site-packages/torch/backends/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/backends/_coreml/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/backends/_coreml/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/backends/_coreml/__pycache__/preprocess.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/evaluate-0.4.1.dist-info/RECORD
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
../../../bin/evaluate-cli,sha256=ebVljaZgWxr1sBQVsnDdhaYD2rXNcaw-u7JiBl8yibA,255
|
2 |
+
evaluate-0.4.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
3 |
+
evaluate-0.4.1.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
|
4 |
+
evaluate-0.4.1.dist-info/METADATA,sha256=gyoxlsBnA-d8Kb9Bj8RKeRRdIp8gH4ILqfUeBdj4va8,9412
|
5 |
+
evaluate-0.4.1.dist-info/RECORD,,
|
6 |
+
evaluate-0.4.1.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92
|
7 |
+
evaluate-0.4.1.dist-info/entry_points.txt,sha256=m2P3heof0lsg47nq6tYW_yUtxTfimd3RuD26Yk8KMkM,70
|
8 |
+
evaluate-0.4.1.dist-info/top_level.txt,sha256=wBEoxird-u8p4OKDwq5z9rlfH-ybeez8rjaKNLNJ3B0,9
|
9 |
+
evaluate/__init__.py,sha256=UNd1S0HL23X2WHwt00PRuBJG3ESebsSvJQTYqunzZYk,1754
|
10 |
+
evaluate/__pycache__/__init__.cpython-310.pyc,,
|
11 |
+
evaluate/__pycache__/config.cpython-310.pyc,,
|
12 |
+
evaluate/__pycache__/hub.cpython-310.pyc,,
|
13 |
+
evaluate/__pycache__/info.cpython-310.pyc,,
|
14 |
+
evaluate/__pycache__/inspect.cpython-310.pyc,,
|
15 |
+
evaluate/__pycache__/loading.cpython-310.pyc,,
|
16 |
+
evaluate/__pycache__/module.cpython-310.pyc,,
|
17 |
+
evaluate/__pycache__/naming.cpython-310.pyc,,
|
18 |
+
evaluate/__pycache__/saving.cpython-310.pyc,,
|
19 |
+
evaluate/__pycache__/visualization.cpython-310.pyc,,
|
20 |
+
evaluate/commands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
21 |
+
evaluate/commands/__pycache__/__init__.cpython-310.pyc,,
|
22 |
+
evaluate/commands/__pycache__/evaluate_cli.cpython-310.pyc,,
|
23 |
+
evaluate/commands/evaluate_cli.py,sha256=w7GWb48JPjoC0BX7Jn12qtxQUBYOlZNhdg4YegA93Fw,4491
|
24 |
+
evaluate/config.py,sha256=g4g-S6hVAw0Ys9As7gKaFP66pZeh8hoJJ5GEXaLSWV8,6648
|
25 |
+
evaluate/evaluation_suite/__init__.py,sha256=TjcFihBDf_ZQAoIjSXPEC0iFBeEC_LFqCfXKbrkyhWs,4941
|
26 |
+
evaluate/evaluation_suite/__pycache__/__init__.cpython-310.pyc,,
|
27 |
+
evaluate/evaluator/__init__.py,sha256=JoWqRP-qCgNzDre6nO8zpJ2Iyp0eUkN7eDKPOPUXz2g,5788
|
28 |
+
evaluate/evaluator/__pycache__/__init__.cpython-310.pyc,,
|
29 |
+
evaluate/evaluator/__pycache__/audio_classification.cpython-310.pyc,,
|
30 |
+
evaluate/evaluator/__pycache__/automatic_speech_recognition.cpython-310.pyc,,
|
31 |
+
evaluate/evaluator/__pycache__/base.cpython-310.pyc,,
|
32 |
+
evaluate/evaluator/__pycache__/image_classification.cpython-310.pyc,,
|
33 |
+
evaluate/evaluator/__pycache__/question_answering.cpython-310.pyc,,
|
34 |
+
evaluate/evaluator/__pycache__/text2text_generation.cpython-310.pyc,,
|
35 |
+
evaluate/evaluator/__pycache__/text_classification.cpython-310.pyc,,
|
36 |
+
evaluate/evaluator/__pycache__/text_generation.cpython-310.pyc,,
|
37 |
+
evaluate/evaluator/__pycache__/token_classification.cpython-310.pyc,,
|
38 |
+
evaluate/evaluator/__pycache__/utils.cpython-310.pyc,,
|
39 |
+
evaluate/evaluator/audio_classification.py,sha256=v5myOnm0PN8BWVnm4nWCzcyklaLtdnbOS3EJ09TPFhg,5804
|
40 |
+
evaluate/evaluator/automatic_speech_recognition.py,sha256=jOveYJXsH-t5SzGe7FzXhnHeDKFhqWZUtK3S1l9XYus,4392
|
41 |
+
evaluate/evaluator/base.py,sha256=TkkPa6jJWQfNgIK_FVEF24VUfuBn5aZ7Wo1hAfvJEhA,22881
|
42 |
+
evaluate/evaluator/image_classification.py,sha256=RJ7NUS91hjZkr5JqhqtYsr5dxBkChA3Qim6An8fHT50,4751
|
43 |
+
evaluate/evaluator/question_answering.py,sha256=ArF5BKfE9J9uC-q1GQwbvkAHw1ThgA997ERKmPS-Z4g,9566
|
44 |
+
evaluate/evaluator/text2text_generation.py,sha256=M2itKYfIz9z_9J-Y7sXyx4HKMhQbdYwbv8oThSw8Yzw,9676
|
45 |
+
evaluate/evaluator/text_classification.py,sha256=g1MUwa3TCUCUBGvZDmdeJ_l8BAOgbn0Q0y4TDvep8Uk,6676
|
46 |
+
evaluate/evaluator/text_generation.py,sha256=4ZnHweTUpvNZhaprewTPms__00I8Tnje586ZDCG_ZlU,2679
|
47 |
+
evaluate/evaluator/token_classification.py,sha256=XMzteW1coN2e3KWmpWj-OGafj22pzMa7UiHylooirHk,11546
|
48 |
+
evaluate/evaluator/utils.py,sha256=HDKdLWLHtfpP-Hhe9cf1TFVIRsmfNgLHifDcGYujKZs,2451
|
49 |
+
evaluate/hub.py,sha256=ZX6VYZU0EkjTWmABuJ6Zg6oHXIT2dHkHy0u8RgyL9UQ,4550
|
50 |
+
evaluate/info.py,sha256=l5gXfqHhj77-XvFhz57Mns-Ev-lNJsLxsyYPHPvSzj0,5490
|
51 |
+
evaluate/inspect.py,sha256=vVSCLr7HWLxIpXzwpDPuiE5XwiP5QQ82oGkdok7aO7o,4969
|
52 |
+
evaluate/loading.py,sha256=IdxAMbbjyAID8NFLDuOjU0WK5Vw_Ep4HoziYeu1ySMI,35228
|
53 |
+
evaluate/module.py,sha256=Va2FrSJnTXr6P5bspjp3SXgnvdvPm6yEcAasaTX9LJU,46290
|
54 |
+
evaluate/naming.py,sha256=Lpw8JmoJfiWs4xDUMEDzcIKO9Nw9RS2lzjeuUP-9acA,2827
|
55 |
+
evaluate/saving.py,sha256=UoixNIHmWEceJREvGZlJNViVjRkgNf3MRflwnnhnNUA,2159
|
56 |
+
evaluate/utils/__init__.py,sha256=kdFi2pVFSXm_y4EvvuQNnlPUkOPmGLNtc9YTfxAmdsI,1201
|
57 |
+
evaluate/utils/__pycache__/__init__.cpython-310.pyc,,
|
58 |
+
evaluate/utils/__pycache__/file_utils.cpython-310.pyc,,
|
59 |
+
evaluate/utils/__pycache__/gradio.cpython-310.pyc,,
|
60 |
+
evaluate/utils/__pycache__/logging.cpython-310.pyc,,
|
61 |
+
evaluate/utils/file_utils.py,sha256=uGkXJYWQBKNALhdxktpQ_844jCjKLFQg6l_3KKK2zGI,22602
|
62 |
+
evaluate/utils/gradio.py,sha256=UXGRxiPsJ41Xm5gGF7Jf_1vTOPopE_wDoBIyBS0S8d4,4434
|
63 |
+
evaluate/utils/logging.py,sha256=nRy963i3_-H0Qcer6ETgnTFiJoQhojSiapeXQ9-eUyk,6698
|
64 |
+
evaluate/visualization.py,sha256=m-mD6vxOIQ-_KXTues2tB4r7c4jdygBybHJeidP-jgw,9293
|
env-llmeval/lib/python3.10/site-packages/evaluate-0.4.1.dist-info/WHEEL
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Wheel-Version: 1.0
|
2 |
+
Generator: bdist_wheel (0.41.2)
|
3 |
+
Root-Is-Purelib: true
|
4 |
+
Tag: py3-none-any
|
5 |
+
|
env-llmeval/lib/python3.10/site-packages/evaluate-0.4.1.dist-info/top_level.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
evaluate
|
env-llmeval/lib/python3.10/site-packages/frozenlist/__init__.py
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import types
|
4 |
+
from collections.abc import MutableSequence
|
5 |
+
from functools import total_ordering
|
6 |
+
from typing import Type
|
7 |
+
|
8 |
+
__version__ = "1.4.1"
|
9 |
+
|
10 |
+
__all__ = ("FrozenList", "PyFrozenList") # type: Tuple[str, ...]
|
11 |
+
|
12 |
+
|
13 |
+
NO_EXTENSIONS = bool(os.environ.get("FROZENLIST_NO_EXTENSIONS")) # type: bool
|
14 |
+
|
15 |
+
|
16 |
+
@total_ordering
|
17 |
+
class FrozenList(MutableSequence):
|
18 |
+
__slots__ = ("_frozen", "_items")
|
19 |
+
|
20 |
+
if sys.version_info >= (3, 9):
|
21 |
+
__class_getitem__ = classmethod(types.GenericAlias)
|
22 |
+
else:
|
23 |
+
|
24 |
+
@classmethod
|
25 |
+
def __class_getitem__(cls: Type["FrozenList"]) -> Type["FrozenList"]:
|
26 |
+
return cls
|
27 |
+
|
28 |
+
def __init__(self, items=None):
|
29 |
+
self._frozen = False
|
30 |
+
if items is not None:
|
31 |
+
items = list(items)
|
32 |
+
else:
|
33 |
+
items = []
|
34 |
+
self._items = items
|
35 |
+
|
36 |
+
@property
|
37 |
+
def frozen(self):
|
38 |
+
return self._frozen
|
39 |
+
|
40 |
+
def freeze(self):
|
41 |
+
self._frozen = True
|
42 |
+
|
43 |
+
def __getitem__(self, index):
|
44 |
+
return self._items[index]
|
45 |
+
|
46 |
+
def __setitem__(self, index, value):
|
47 |
+
if self._frozen:
|
48 |
+
raise RuntimeError("Cannot modify frozen list.")
|
49 |
+
self._items[index] = value
|
50 |
+
|
51 |
+
def __delitem__(self, index):
|
52 |
+
if self._frozen:
|
53 |
+
raise RuntimeError("Cannot modify frozen list.")
|
54 |
+
del self._items[index]
|
55 |
+
|
56 |
+
def __len__(self):
|
57 |
+
return self._items.__len__()
|
58 |
+
|
59 |
+
def __iter__(self):
|
60 |
+
return self._items.__iter__()
|
61 |
+
|
62 |
+
def __reversed__(self):
|
63 |
+
return self._items.__reversed__()
|
64 |
+
|
65 |
+
def __eq__(self, other):
|
66 |
+
return list(self) == other
|
67 |
+
|
68 |
+
def __le__(self, other):
|
69 |
+
return list(self) <= other
|
70 |
+
|
71 |
+
def insert(self, pos, item):
|
72 |
+
if self._frozen:
|
73 |
+
raise RuntimeError("Cannot modify frozen list.")
|
74 |
+
self._items.insert(pos, item)
|
75 |
+
|
76 |
+
def __repr__(self):
|
77 |
+
return f"<FrozenList(frozen={self._frozen}, {self._items!r})>"
|
78 |
+
|
79 |
+
def __hash__(self):
|
80 |
+
if self._frozen:
|
81 |
+
return hash(tuple(self))
|
82 |
+
else:
|
83 |
+
raise RuntimeError("Cannot hash unfrozen list.")
|
84 |
+
|
85 |
+
|
86 |
+
PyFrozenList = FrozenList
|
87 |
+
|
88 |
+
|
89 |
+
if not NO_EXTENSIONS:
|
90 |
+
try:
|
91 |
+
from ._frozenlist import FrozenList as CFrozenList # type: ignore
|
92 |
+
except ImportError: # pragma: no cover
|
93 |
+
pass
|
94 |
+
else:
|
95 |
+
FrozenList = CFrozenList # type: ignore
|
env-llmeval/lib/python3.10/site-packages/frozenlist/__init__.pyi
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import (
|
2 |
+
Generic,
|
3 |
+
Iterable,
|
4 |
+
Iterator,
|
5 |
+
List,
|
6 |
+
MutableSequence,
|
7 |
+
Optional,
|
8 |
+
TypeVar,
|
9 |
+
Union,
|
10 |
+
overload,
|
11 |
+
)
|
12 |
+
|
13 |
+
_T = TypeVar("_T")
|
14 |
+
_Arg = Union[List[_T], Iterable[_T]]
|
15 |
+
|
16 |
+
class FrozenList(MutableSequence[_T], Generic[_T]):
|
17 |
+
def __init__(self, items: Optional[_Arg[_T]] = None) -> None: ...
|
18 |
+
@property
|
19 |
+
def frozen(self) -> bool: ...
|
20 |
+
def freeze(self) -> None: ...
|
21 |
+
@overload
|
22 |
+
def __getitem__(self, i: int) -> _T: ...
|
23 |
+
@overload
|
24 |
+
def __getitem__(self, s: slice) -> FrozenList[_T]: ...
|
25 |
+
@overload
|
26 |
+
def __setitem__(self, i: int, o: _T) -> None: ...
|
27 |
+
@overload
|
28 |
+
def __setitem__(self, s: slice, o: Iterable[_T]) -> None: ...
|
29 |
+
@overload
|
30 |
+
def __delitem__(self, i: int) -> None: ...
|
31 |
+
@overload
|
32 |
+
def __delitem__(self, i: slice) -> None: ...
|
33 |
+
def __len__(self) -> int: ...
|
34 |
+
def __iter__(self) -> Iterator[_T]: ...
|
35 |
+
def __reversed__(self) -> Iterator[_T]: ...
|
36 |
+
def __eq__(self, other: object) -> bool: ...
|
37 |
+
def __le__(self, other: FrozenList[_T]) -> bool: ...
|
38 |
+
def __ne__(self, other: object) -> bool: ...
|
39 |
+
def __lt__(self, other: FrozenList[_T]) -> bool: ...
|
40 |
+
def __ge__(self, other: FrozenList[_T]) -> bool: ...
|
41 |
+
def __gt__(self, other: FrozenList[_T]) -> bool: ...
|
42 |
+
def insert(self, pos: int, item: _T) -> None: ...
|
43 |
+
def __repr__(self) -> str: ...
|
44 |
+
def __hash__(self) -> int: ...
|
45 |
+
|
46 |
+
# types for C accelerators are the same
|
47 |
+
CFrozenList = PyFrozenList = FrozenList
|
env-llmeval/lib/python3.10/site-packages/frozenlist/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (3.24 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/frozenlist/_frozenlist.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (766 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/frozenlist/_frozenlist.pyx
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import types
|
3 |
+
from collections.abc import MutableSequence
|
4 |
+
|
5 |
+
|
6 |
+
cdef class FrozenList:
|
7 |
+
|
8 |
+
if sys.version_info >= (3, 9):
|
9 |
+
__class_getitem__ = classmethod(types.GenericAlias)
|
10 |
+
else:
|
11 |
+
@classmethod
|
12 |
+
def __class_getitem__(cls):
|
13 |
+
return cls
|
14 |
+
|
15 |
+
cdef readonly bint frozen
|
16 |
+
cdef list _items
|
17 |
+
|
18 |
+
def __init__(self, items=None):
|
19 |
+
self.frozen = False
|
20 |
+
if items is not None:
|
21 |
+
items = list(items)
|
22 |
+
else:
|
23 |
+
items = []
|
24 |
+
self._items = items
|
25 |
+
|
26 |
+
cdef object _check_frozen(self):
|
27 |
+
if self.frozen:
|
28 |
+
raise RuntimeError("Cannot modify frozen list.")
|
29 |
+
|
30 |
+
cdef inline object _fast_len(self):
|
31 |
+
return len(self._items)
|
32 |
+
|
33 |
+
def freeze(self):
|
34 |
+
self.frozen = True
|
35 |
+
|
36 |
+
def __getitem__(self, index):
|
37 |
+
return self._items[index]
|
38 |
+
|
39 |
+
def __setitem__(self, index, value):
|
40 |
+
self._check_frozen()
|
41 |
+
self._items[index] = value
|
42 |
+
|
43 |
+
def __delitem__(self, index):
|
44 |
+
self._check_frozen()
|
45 |
+
del self._items[index]
|
46 |
+
|
47 |
+
def __len__(self):
|
48 |
+
return self._fast_len()
|
49 |
+
|
50 |
+
def __iter__(self):
|
51 |
+
return self._items.__iter__()
|
52 |
+
|
53 |
+
def __reversed__(self):
|
54 |
+
return self._items.__reversed__()
|
55 |
+
|
56 |
+
def __richcmp__(self, other, op):
|
57 |
+
if op == 0: # <
|
58 |
+
return list(self) < other
|
59 |
+
if op == 1: # <=
|
60 |
+
return list(self) <= other
|
61 |
+
if op == 2: # ==
|
62 |
+
return list(self) == other
|
63 |
+
if op == 3: # !=
|
64 |
+
return list(self) != other
|
65 |
+
if op == 4: # >
|
66 |
+
return list(self) > other
|
67 |
+
if op == 5: # =>
|
68 |
+
return list(self) >= other
|
69 |
+
|
70 |
+
def insert(self, pos, item):
|
71 |
+
self._check_frozen()
|
72 |
+
self._items.insert(pos, item)
|
73 |
+
|
74 |
+
def __contains__(self, item):
|
75 |
+
return item in self._items
|
76 |
+
|
77 |
+
def __iadd__(self, items):
|
78 |
+
self._check_frozen()
|
79 |
+
self._items += list(items)
|
80 |
+
return self
|
81 |
+
|
82 |
+
def index(self, item):
|
83 |
+
return self._items.index(item)
|
84 |
+
|
85 |
+
def remove(self, item):
|
86 |
+
self._check_frozen()
|
87 |
+
self._items.remove(item)
|
88 |
+
|
89 |
+
def clear(self):
|
90 |
+
self._check_frozen()
|
91 |
+
self._items.clear()
|
92 |
+
|
93 |
+
def extend(self, items):
|
94 |
+
self._check_frozen()
|
95 |
+
self._items += list(items)
|
96 |
+
|
97 |
+
def reverse(self):
|
98 |
+
self._check_frozen()
|
99 |
+
self._items.reverse()
|
100 |
+
|
101 |
+
def pop(self, index=-1):
|
102 |
+
self._check_frozen()
|
103 |
+
return self._items.pop(index)
|
104 |
+
|
105 |
+
def append(self, item):
|
106 |
+
self._check_frozen()
|
107 |
+
return self._items.append(item)
|
108 |
+
|
109 |
+
def count(self, item):
|
110 |
+
return self._items.count(item)
|
111 |
+
|
112 |
+
def __repr__(self):
|
113 |
+
return '<FrozenList(frozen={}, {!r})>'.format(self.frozen,
|
114 |
+
self._items)
|
115 |
+
|
116 |
+
def __hash__(self):
|
117 |
+
if self.frozen:
|
118 |
+
return hash(tuple(self._items))
|
119 |
+
else:
|
120 |
+
raise RuntimeError("Cannot hash unfrozen list.")
|
121 |
+
|
122 |
+
|
123 |
+
MutableSequence.register(FrozenList)
|
env-llmeval/lib/python3.10/site-packages/frozenlist/py.typed
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Marker
|
env-llmeval/lib/python3.10/site-packages/python_dateutil-2.9.0.post0.dist-info/INSTALLER
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
pip
|
env-llmeval/lib/python3.10/site-packages/torch/_C/_VariableFunctions.pyi
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/torch/_C/_autograd.pyi
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from enum import Enum
|
2 |
+
from typing import Any, Callable, List, Optional, Set
|
3 |
+
|
4 |
+
import torch
|
5 |
+
|
6 |
+
from ._profiler import (
|
7 |
+
_ProfilerEvent,
|
8 |
+
ActiveProfilerType,
|
9 |
+
ProfilerActivity,
|
10 |
+
ProfilerConfig,
|
11 |
+
)
|
12 |
+
|
13 |
+
# Defined in tools/autograd/init.cpp
|
14 |
+
|
15 |
+
class DeviceType(Enum):
|
16 |
+
CPU = ...
|
17 |
+
CUDA = ...
|
18 |
+
MKLDNN = ...
|
19 |
+
OPENGL = ...
|
20 |
+
OPENCL = ...
|
21 |
+
IDEEP = ...
|
22 |
+
HIP = ...
|
23 |
+
FPGA = ...
|
24 |
+
ORT = ...
|
25 |
+
XLA = ...
|
26 |
+
MPS = ...
|
27 |
+
HPU = ...
|
28 |
+
Meta = ...
|
29 |
+
Vulkan = ...
|
30 |
+
Metal = ...
|
31 |
+
PrivateUse1 = ...
|
32 |
+
|
33 |
+
class ProfilerEvent:
|
34 |
+
def cpu_elapsed_us(self, other: ProfilerEvent) -> float: ...
|
35 |
+
def cpu_memory_usage(self) -> int: ...
|
36 |
+
def cuda_elapsed_us(self, other: ProfilerEvent) -> float: ...
|
37 |
+
def privateuse1_elapsed_us(self, other: ProfilerEvent) -> float: ...
|
38 |
+
def cuda_memory_usage(self) -> int: ...
|
39 |
+
def device(self) -> int: ...
|
40 |
+
def handle(self) -> int: ...
|
41 |
+
def has_cuda(self) -> bool: ...
|
42 |
+
def is_remote(self) -> bool: ...
|
43 |
+
def kind(self) -> int: ...
|
44 |
+
def name(self) -> str: ...
|
45 |
+
def node_id(self) -> int: ...
|
46 |
+
def sequence_nr(self) -> int: ...
|
47 |
+
def shapes(self) -> List[List[int]]: ...
|
48 |
+
def thread_id(self) -> int: ...
|
49 |
+
def flops(self) -> float: ...
|
50 |
+
def is_async(self) -> bool: ...
|
51 |
+
|
52 |
+
class _KinetoEvent:
|
53 |
+
def name(self) -> str: ...
|
54 |
+
def device_index(self) -> int: ...
|
55 |
+
def start_us(self) -> int: ...
|
56 |
+
def duration_us(self) -> int: ...
|
57 |
+
def is_async(self) -> bool: ...
|
58 |
+
def linked_correlation_id(self) -> int: ...
|
59 |
+
def shapes(self) -> List[List[int]]: ...
|
60 |
+
def dtypes(self) -> List[str]: ...
|
61 |
+
def concrete_inputs(self) -> List[Any]: ...
|
62 |
+
def device_type(self) -> DeviceType: ...
|
63 |
+
def start_thread_id(self) -> int: ...
|
64 |
+
def end_thread_id(self) -> int: ...
|
65 |
+
def correlation_id(self) -> int: ...
|
66 |
+
def fwd_thread_id(self) -> int: ...
|
67 |
+
def stack(self) -> List[str]: ...
|
68 |
+
def scope(self) -> int: ...
|
69 |
+
def sequence_nr(self) -> int: ...
|
70 |
+
def flops(self) -> int: ...
|
71 |
+
def cuda_elapsed_us(self) -> int: ...
|
72 |
+
def privateuse1_elapsed_us(self) -> int: ...
|
73 |
+
|
74 |
+
class _ProfilerResult:
|
75 |
+
def events(self) -> List[_KinetoEvent]: ...
|
76 |
+
def legacy_events(self) -> List[List[ProfilerEvent]]: ...
|
77 |
+
def save(self, path: str) -> None: ...
|
78 |
+
def experimental_event_tree(self) -> List[_ProfilerEvent]: ...
|
79 |
+
def trace_start_us(self) -> int: ...
|
80 |
+
|
81 |
+
class SavedTensor: ...
|
82 |
+
|
83 |
+
def _enable_profiler(
|
84 |
+
config: ProfilerConfig,
|
85 |
+
activities: Set[ProfilerActivity],
|
86 |
+
) -> None: ...
|
87 |
+
def _prepare_profiler(
|
88 |
+
config: ProfilerConfig,
|
89 |
+
activities: Set[ProfilerActivity],
|
90 |
+
) -> None: ...
|
91 |
+
def _disable_profiler() -> _ProfilerResult: ...
|
92 |
+
def _profiler_enabled() -> bool: ...
|
93 |
+
def _add_metadata_json(key: str, value: str) -> None: ...
|
94 |
+
def _kineto_step() -> None: ...
|
95 |
+
def _get_sequence_nr() -> int: ...
|
96 |
+
def kineto_available() -> bool: ...
|
97 |
+
def _record_function_with_args_enter(name: str, *args) -> torch.Tensor: ...
|
98 |
+
def _record_function_with_args_exit(handle: torch.Tensor) -> None: ...
|
99 |
+
def _supported_activities() -> Set[ProfilerActivity]: ...
|
100 |
+
def _enable_record_function(enable: bool) -> None: ...
|
101 |
+
def _set_empty_test_observer(is_global: bool, sampling_prob: float) -> None: ...
|
102 |
+
def _push_saved_tensors_default_hooks(
|
103 |
+
pack_hook: Callable[[torch.Tensor], Any],
|
104 |
+
unpack_hook: Callable[[Any], torch.Tensor],
|
105 |
+
) -> None: ...
|
106 |
+
def _pop_saved_tensors_default_hooks() -> None: ...
|
107 |
+
def _unsafe_set_version_counter(t: torch.Tensor, prev_version: int) -> None: ...
|
108 |
+
def _enable_profiler_legacy(config: ProfilerConfig) -> None: ...
|
109 |
+
def _disable_profiler_legacy() -> List[List[ProfilerEvent]]: ...
|
110 |
+
def _profiler_type() -> ActiveProfilerType: ...
|
111 |
+
def _saved_tensors_hooks_enable() -> None: ...
|
112 |
+
def _saved_tensors_hooks_disable(message: str) -> None: ...
|
113 |
+
def _saved_tensors_hooks_get_disabled_error_message() -> Optional[str]: ...
|
114 |
+
|
115 |
+
class CreationMeta(Enum):
|
116 |
+
DEFAULT = ...
|
117 |
+
IN_CUSTOM_FUNCTION = ...
|
118 |
+
MULTI_OUTPUT_NODE = ...
|
119 |
+
NO_GRAD_MODE = ...
|
120 |
+
INFERENCE_MODE = ...
|
121 |
+
|
122 |
+
def _set_creation_meta(t: torch.Tensor, creation_meta: CreationMeta) -> None: ...
|
123 |
+
def _get_creation_meta(t: torch.Tensor) -> CreationMeta: ...
|
env-llmeval/lib/python3.10/site-packages/torch/_C/_cpu.pyi
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch.types import _bool
|
2 |
+
|
3 |
+
# Defined in torch/csrc/cpu/Module.cpp
|
4 |
+
|
5 |
+
def _is_cpu_support_vnni() -> _bool: ...
|
env-llmeval/lib/python3.10/site-packages/torch/_C/_distributed_autograd.pyi
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, Dict, List, Set
|
2 |
+
|
3 |
+
import torch
|
4 |
+
|
5 |
+
# This module is defined in torch/csrc/distributed/autograd/init.cpp
|
6 |
+
|
7 |
+
class DistAutogradContext:
|
8 |
+
def _context_id(self) -> int: ...
|
9 |
+
def _recv_functions(self) -> Dict[int, Any]: ...
|
10 |
+
def _send_functions(self) -> Dict[int, Any]: ...
|
11 |
+
def _known_worker_ids(self) -> Set[int]: ...
|
12 |
+
|
13 |
+
def _new_context() -> DistAutogradContext: ...
|
14 |
+
def _release_context(context_id: int) -> None: ...
|
15 |
+
def _get_max_id() -> int: ...
|
16 |
+
def _is_valid_context(worker_id: int) -> bool: ...
|
17 |
+
def _retrieve_context(context_id: int) -> DistAutogradContext: ...
|
18 |
+
def _current_context() -> DistAutogradContext: ...
|
19 |
+
def _init(worker_id: int) -> None: ...
|
20 |
+
def _get_debug_info() -> Dict[str, str]: ...
|
21 |
+
def backward(
|
22 |
+
context_id: int,
|
23 |
+
roots: List[torch.Tensor],
|
24 |
+
retain_graph=False,
|
25 |
+
) -> None: ...
|
26 |
+
def get_gradients(context_id: int) -> Dict[torch.Tensor, torch.Tensor]: ...
|
env-llmeval/lib/python3.10/site-packages/torch/_C/_distributed_c10d.pyi
ADDED
@@ -0,0 +1,478 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# mypy: disable-error-code="type-arg"
|
2 |
+
from datetime import timedelta
|
3 |
+
from enum import Enum
|
4 |
+
from typing import Any, Dict, List, Optional, overload, Tuple, Union
|
5 |
+
|
6 |
+
from torch import Tensor
|
7 |
+
from torch._C import ScriptObject
|
8 |
+
from torch.futures import Future
|
9 |
+
|
10 |
+
# This module is defined in torch/csrc/distributed/c10d/init.cpp
|
11 |
+
|
12 |
+
_DEFAULT_FIRST_BUCKET_BYTES: int
|
13 |
+
_DEFAULT_NO_TIMEOUT: timedelta
|
14 |
+
_DEFAULT_PG_TIMEOUT: timedelta
|
15 |
+
_DEFAULT_PG_NCCL_TIMEOUT: timedelta
|
16 |
+
|
17 |
+
class BuiltinCommHookType(Enum):
|
18 |
+
ALLREDUCE = ...
|
19 |
+
FP16_COMPRESS = ...
|
20 |
+
|
21 |
+
def _register_comm_hook(reducer: Reducer, state: Any, comm_hook: Any): ...
|
22 |
+
def _register_builtin_comm_hook(
|
23 |
+
reducer: Reducer,
|
24 |
+
comm_hook_type: BuiltinCommHookType,
|
25 |
+
): ...
|
26 |
+
|
27 |
+
class GradBucket:
|
28 |
+
def index(self) -> int: ...
|
29 |
+
def buffer(self) -> Tensor: ...
|
30 |
+
def gradients(self) -> List[Tensor]: ...
|
31 |
+
def is_last(self) -> bool: ...
|
32 |
+
def set_buffer(self, tensor: Tensor) -> None: ...
|
33 |
+
def parameters(self) -> List[Tensor]: ...
|
34 |
+
|
35 |
+
class Reducer:
|
36 |
+
def __init__(
|
37 |
+
self,
|
38 |
+
params: List[Tensor],
|
39 |
+
bucket_indices: List[List[int]],
|
40 |
+
per_bucket_size_limits: List[int],
|
41 |
+
process_group: ProcessGroup,
|
42 |
+
expect_sparse_gradients: List[bool] = ...,
|
43 |
+
bucket_bytes_cap: int = ..., # kDefaultBucketBytesCap in reducer.hpp
|
44 |
+
find_unused_parameters: bool = ...,
|
45 |
+
gradient_as_bucket_view: bool = ...,
|
46 |
+
param_to_name_mapping: Dict[int, str] = ...,
|
47 |
+
first_bucket_types_cap: int = ..., # kDefaultFirstBucketBytes in reducer.hpp
|
48 |
+
): ...
|
49 |
+
def prepare_for_forward(self) -> None: ...
|
50 |
+
def prepare_for_backward(self, output: List[Tensor]) -> None: ...
|
51 |
+
def get_backward_stats(self) -> List[int]: ...
|
52 |
+
def _install_post_backward_futures(self, futures: List[Future]) -> None: ...
|
53 |
+
def _rebuild_buckets(self) -> bool: ...
|
54 |
+
def _get_zeros_like_grad_buckets(self) -> List[GradBucket]: ...
|
55 |
+
def _push_all_rebuilt_params(self) -> None: ...
|
56 |
+
def _set_forward_pass_work_handle(
|
57 |
+
self,
|
58 |
+
work: Work,
|
59 |
+
use_static_world_size: bool,
|
60 |
+
): ...
|
61 |
+
def _get_local_used_map(self) -> Tensor: ...
|
62 |
+
def _set_ddp_runtime_logging_sample_rate(self, sample_rate: int) -> None: ...
|
63 |
+
def _set_static_graph(self) -> None: ...
|
64 |
+
def _run_comm_hook(self, bucket: GradBucket) -> Future: ...
|
65 |
+
def set_logger(self, logger: Logger) -> None: ...
|
66 |
+
def _remove_autograd_hooks(self) -> None: ...
|
67 |
+
def _check_reducer_finalized(self) -> None: ...
|
68 |
+
def _set_sparse_metadata(self, global_unique_ids: Dict[str, Tensor]) -> None: ...
|
69 |
+
def _reset_state(self) -> None: ...
|
70 |
+
def _update_process_group(self, new_process_group: ProcessGroup) -> None: ...
|
71 |
+
|
72 |
+
class DDPLoggingData:
|
73 |
+
strs_map: Dict[str, str]
|
74 |
+
ints_map: Dict[str, int]
|
75 |
+
|
76 |
+
class Logger:
|
77 |
+
def __init__(self, reducer: Reducer): ...
|
78 |
+
def set_construction_data_and_log(
|
79 |
+
self,
|
80 |
+
module_name: str,
|
81 |
+
device_ids: List[int],
|
82 |
+
output_device: int,
|
83 |
+
broadcast_buffers: bool,
|
84 |
+
has_sync_bn: bool,
|
85 |
+
static_graph: bool,
|
86 |
+
): ...
|
87 |
+
def set_runtime_stats_and_log(self) -> None: ...
|
88 |
+
def set_error_and_log(self, error: str) -> None: ...
|
89 |
+
def _get_ddp_logging_data(self) -> DDPLoggingData: ...
|
90 |
+
def _set_comm_hook_name(self, comm_hook: str) -> None: ...
|
91 |
+
def _set_uneven_input_join(self) -> None: ...
|
92 |
+
def _set_static_graph(self) -> None: ...
|
93 |
+
|
94 |
+
def get_debug_level(): ...
|
95 |
+
def set_debug_level(): ...
|
96 |
+
def set_debug_level_from_env(): ...
|
97 |
+
|
98 |
+
class DebugLevel(Enum):
|
99 |
+
OFF = ...
|
100 |
+
INFO = ...
|
101 |
+
DETAIL = ...
|
102 |
+
|
103 |
+
class ReduceOp:
|
104 |
+
def __init__(self, op: RedOpType): ...
|
105 |
+
|
106 |
+
SUM: RedOpType = ...
|
107 |
+
AVG: RedOpType = ...
|
108 |
+
PRODUCT: RedOpType = ...
|
109 |
+
MIN: RedOpType = ...
|
110 |
+
MAX: RedOpType = ...
|
111 |
+
BAND: RedOpType = ...
|
112 |
+
BOR: RedOpType = ...
|
113 |
+
BXOR: RedOpType = ...
|
114 |
+
PREMUL_SUM: RedOpType = ...
|
115 |
+
UNUSED: RedOpType = ...
|
116 |
+
|
117 |
+
class RedOpType(Enum): ...
|
118 |
+
|
119 |
+
class BroadcastOptions:
|
120 |
+
rootRank: int
|
121 |
+
rootTensor: int
|
122 |
+
timeout: timedelta
|
123 |
+
asyncOp: bool
|
124 |
+
|
125 |
+
class AllreduceOptions:
|
126 |
+
reduceOp: ReduceOp
|
127 |
+
timeout: timedelta
|
128 |
+
|
129 |
+
class AllreduceCoalescedOptions(AllreduceOptions): ...
|
130 |
+
|
131 |
+
class ReduceOptions:
|
132 |
+
reduceOp: ReduceOp
|
133 |
+
rootRank: int
|
134 |
+
rootTensor: int
|
135 |
+
timeout: timedelta
|
136 |
+
|
137 |
+
class AllgatherOptions:
|
138 |
+
timeout: timedelta
|
139 |
+
asyncOp: bool
|
140 |
+
|
141 |
+
class GatherOptions:
|
142 |
+
rootRank: int
|
143 |
+
timeout: timedelta
|
144 |
+
|
145 |
+
class ScatterOptions:
|
146 |
+
rootRank: int
|
147 |
+
timeout: timedelta
|
148 |
+
asyncOp: bool
|
149 |
+
|
150 |
+
class ReduceScatterOptions:
|
151 |
+
reduceOp: ReduceOp
|
152 |
+
timeout: timedelta
|
153 |
+
asyncOp: bool
|
154 |
+
|
155 |
+
class BarrierOptions:
|
156 |
+
device_ids: List[int]
|
157 |
+
timeout: timedelta
|
158 |
+
|
159 |
+
class AllToAllOptions:
|
160 |
+
timeout: timedelta
|
161 |
+
|
162 |
+
class Store:
|
163 |
+
def set(self, key: str, value: str): ...
|
164 |
+
def get(self, key: str) -> bytes: ...
|
165 |
+
def add(self, key: str, value: int) -> int: ...
|
166 |
+
def compare_set(
|
167 |
+
self,
|
168 |
+
key: str,
|
169 |
+
expected_value: str,
|
170 |
+
desired_value: str,
|
171 |
+
) -> bytes: ...
|
172 |
+
def delete_key(self, key: str) -> bool: ...
|
173 |
+
def num_keys(self) -> int: ...
|
174 |
+
def set_timeout(self, timeout: timedelta): ...
|
175 |
+
@overload
|
176 |
+
def wait(self, keys: List[str]): ...
|
177 |
+
@overload
|
178 |
+
def wait(self, keys: List[str], timeout: timedelta): ...
|
179 |
+
|
180 |
+
class FileStore(Store):
|
181 |
+
def __init__(self, path: str, numWorkers: int = ...): ...
|
182 |
+
|
183 |
+
class HashStore(Store):
|
184 |
+
def __init__(self): ...
|
185 |
+
|
186 |
+
class TCPStore(Store):
|
187 |
+
def __init__(
|
188 |
+
self,
|
189 |
+
host_name: str,
|
190 |
+
port: int,
|
191 |
+
world_size: Optional[int] = ...,
|
192 |
+
is_master: bool = ...,
|
193 |
+
timeout: timedelta = ...,
|
194 |
+
wait_for_workers: bool = ...,
|
195 |
+
multi_tenant: bool = ...,
|
196 |
+
master_listen_fd: Optional[int] = ...,
|
197 |
+
use_libuv: Optional[bool] = ...,
|
198 |
+
): ...
|
199 |
+
@property
|
200 |
+
def host(self) -> str: ...
|
201 |
+
@property
|
202 |
+
def port(self) -> int: ...
|
203 |
+
|
204 |
+
class PrefixStore(Store):
|
205 |
+
def __init__(self, prefix: str, store: Store): ...
|
206 |
+
@property
|
207 |
+
def underlying_store(self) -> Store: ...
|
208 |
+
|
209 |
+
class Work:
|
210 |
+
def is_completed(self) -> bool: ...
|
211 |
+
def is_success(self) -> bool: ...
|
212 |
+
def exception(self) -> Any: ...
|
213 |
+
def wait(self, timeout: timedelta = ...) -> bool: ...
|
214 |
+
def source_rank(self) -> int: ...
|
215 |
+
def _source_rank(self) -> int: ...
|
216 |
+
def result(self) -> List[Tensor]: ...
|
217 |
+
def synchronize(self): ...
|
218 |
+
def boxed(self) -> ScriptObject: ...
|
219 |
+
@staticmethod
|
220 |
+
def unbox(obj: ScriptObject) -> Work: ...
|
221 |
+
|
222 |
+
class ProcessGroup:
|
223 |
+
class Options: ...
|
224 |
+
|
225 |
+
def __init__(self): ...
|
226 |
+
def rank(self) -> int: ...
|
227 |
+
def size(self) -> int: ...
|
228 |
+
@overload
|
229 |
+
def broadcast(
|
230 |
+
self,
|
231 |
+
tensors: List[Tensor],
|
232 |
+
opts=...,
|
233 |
+
) -> Work: ...
|
234 |
+
@overload
|
235 |
+
def broadcast(
|
236 |
+
self,
|
237 |
+
tensor: Tensor,
|
238 |
+
root: int,
|
239 |
+
) -> Work: ...
|
240 |
+
@overload
|
241 |
+
def allreduce(
|
242 |
+
self,
|
243 |
+
tensors: List[Tensor],
|
244 |
+
opts: AllreduceOptions = ...,
|
245 |
+
) -> Work: ...
|
246 |
+
@overload
|
247 |
+
def allreduce(
|
248 |
+
self,
|
249 |
+
tensors: List[Tensor],
|
250 |
+
op=...,
|
251 |
+
) -> Work: ...
|
252 |
+
@overload
|
253 |
+
def allreduce(
|
254 |
+
self,
|
255 |
+
tensor: Tensor,
|
256 |
+
op=...,
|
257 |
+
) -> Work: ...
|
258 |
+
def allreduce_coalesced(
|
259 |
+
self,
|
260 |
+
tensors: List[Tensor],
|
261 |
+
opts=...,
|
262 |
+
) -> Work: ...
|
263 |
+
@overload
|
264 |
+
def reduce(
|
265 |
+
self,
|
266 |
+
tensors: List[Tensor],
|
267 |
+
opts=...,
|
268 |
+
) -> Work: ...
|
269 |
+
@overload
|
270 |
+
def reduce(
|
271 |
+
self,
|
272 |
+
tensor: Tensor,
|
273 |
+
root: int,
|
274 |
+
op=...,
|
275 |
+
) -> Work: ...
|
276 |
+
@overload
|
277 |
+
def allgather(
|
278 |
+
self,
|
279 |
+
output_tensors: List[List[Tensor]],
|
280 |
+
input_tensors: List[Tensor],
|
281 |
+
opts=...,
|
282 |
+
) -> Work: ...
|
283 |
+
@overload
|
284 |
+
def allgather(
|
285 |
+
self,
|
286 |
+
output_tensors: List[Tensor],
|
287 |
+
input_tensor: Tensor,
|
288 |
+
) -> Work: ...
|
289 |
+
def _allgather_base(
|
290 |
+
self,
|
291 |
+
output: Tensor,
|
292 |
+
input: Tensor,
|
293 |
+
opts=...,
|
294 |
+
) -> Work: ...
|
295 |
+
def allgather_coalesced(
|
296 |
+
self,
|
297 |
+
output_lists: List[List[Tensor]],
|
298 |
+
input_list: List[Tensor],
|
299 |
+
opts=...,
|
300 |
+
) -> Work: ...
|
301 |
+
@overload
|
302 |
+
def gather(
|
303 |
+
self,
|
304 |
+
output_tensors: List[List[Tensor]],
|
305 |
+
input_tensors: List[Tensor],
|
306 |
+
opts=...,
|
307 |
+
) -> Work: ...
|
308 |
+
@overload
|
309 |
+
def gather(
|
310 |
+
self,
|
311 |
+
output_tensors: List[Tensor],
|
312 |
+
input_tensor: Tensor,
|
313 |
+
root: int,
|
314 |
+
) -> Work: ...
|
315 |
+
@overload
|
316 |
+
def scatter(
|
317 |
+
self,
|
318 |
+
output_tensors: List[Tensor],
|
319 |
+
input_tensors: List[List[Tensor]],
|
320 |
+
opts=...,
|
321 |
+
) -> Work: ...
|
322 |
+
@overload
|
323 |
+
def scatter(
|
324 |
+
self,
|
325 |
+
output_tensor: Tensor,
|
326 |
+
input_tensors: List[Tensor],
|
327 |
+
root: int,
|
328 |
+
) -> Work: ...
|
329 |
+
@overload
|
330 |
+
def reduce_scatter(
|
331 |
+
self,
|
332 |
+
output_tensors: List[Tensor],
|
333 |
+
input_tensors: List[List[Tensor]],
|
334 |
+
opts=...,
|
335 |
+
) -> Work: ...
|
336 |
+
@overload
|
337 |
+
def reduce_scatter(
|
338 |
+
self,
|
339 |
+
output_tensors: Tensor,
|
340 |
+
input_tensor: List[Tensor],
|
341 |
+
) -> Work: ...
|
342 |
+
def _reduce_scatter_base(
|
343 |
+
self,
|
344 |
+
outputTensor: Tensor,
|
345 |
+
inputTensor: Tensor,
|
346 |
+
) -> Work: ...
|
347 |
+
@overload
|
348 |
+
def alltoall_base(
|
349 |
+
self,
|
350 |
+
output_tensor: Tensor,
|
351 |
+
input_tensor: Tensor,
|
352 |
+
output_split_sizes: List[int],
|
353 |
+
input_split_sizes: List[int],
|
354 |
+
opts=...,
|
355 |
+
) -> Work: ...
|
356 |
+
@overload
|
357 |
+
def alltoall_base(
|
358 |
+
self,
|
359 |
+
output: Tensor,
|
360 |
+
input: Tensor,
|
361 |
+
output_split_sizes: List[int],
|
362 |
+
input_split_sizes: List[int],
|
363 |
+
) -> Work: ...
|
364 |
+
@overload
|
365 |
+
def alltoall(
|
366 |
+
self,
|
367 |
+
output_tensor: List[Tensor],
|
368 |
+
input_tensor: List[Tensor],
|
369 |
+
opts=...,
|
370 |
+
) -> Work: ...
|
371 |
+
@overload
|
372 |
+
def alltoall(
|
373 |
+
self,
|
374 |
+
output: List[Tensor],
|
375 |
+
input: List[Tensor],
|
376 |
+
) -> Work: ...
|
377 |
+
def send(
|
378 |
+
self,
|
379 |
+
tensors: List[Tensor],
|
380 |
+
dstRank: int,
|
381 |
+
tag: int,
|
382 |
+
) -> Work: ...
|
383 |
+
def recv(
|
384 |
+
self,
|
385 |
+
tensors: List[Tensor],
|
386 |
+
srcRank: int,
|
387 |
+
tag: int,
|
388 |
+
) -> Work: ...
|
389 |
+
def recv_anysource(self, tensors: List[Tensor], tag: int) -> Work: ...
|
390 |
+
def barrier(self, opts=...) -> Work: ...
|
391 |
+
def boxed(self) -> ScriptObject: ...
|
392 |
+
@staticmethod
|
393 |
+
def unbox(obj: ScriptObject) -> ProcessGroup: ...
|
394 |
+
|
395 |
+
class ProcessGroupRoundRobin(ProcessGroup): ...
|
396 |
+
|
397 |
+
def _round_robin_process_groups(
|
398 |
+
process_groups: List[ProcessGroup],
|
399 |
+
) -> ProcessGroupRoundRobin: ...
|
400 |
+
|
401 |
+
class ProcessGroupGloo(ProcessGroup):
|
402 |
+
class Device: ...
|
403 |
+
class Options: ...
|
404 |
+
|
405 |
+
def __init__(
|
406 |
+
self,
|
407 |
+
store: Store,
|
408 |
+
rank: int,
|
409 |
+
size: int,
|
410 |
+
timeout: timedelta,
|
411 |
+
): ...
|
412 |
+
@staticmethod
|
413 |
+
def create_device(hostname="", interface="") -> Device: ...
|
414 |
+
@staticmethod
|
415 |
+
def create_default_device() -> Device: ...
|
416 |
+
|
417 |
+
class _ProcessGroupWrapper(ProcessGroup):
|
418 |
+
def __init__(self, pg: ProcessGroup, gloo_pg: ProcessGroupGloo): ...
|
419 |
+
wrapped_pg: ProcessGroup
|
420 |
+
|
421 |
+
class ProcessGroupNCCL(ProcessGroup):
|
422 |
+
class Options: ...
|
423 |
+
|
424 |
+
def __init__(
|
425 |
+
self,
|
426 |
+
store: Store,
|
427 |
+
rank: int,
|
428 |
+
size: int,
|
429 |
+
timeout: timedelta,
|
430 |
+
): ...
|
431 |
+
def _group_start(self) -> None: ...
|
432 |
+
def _group_end(self) -> None: ...
|
433 |
+
|
434 |
+
class ProcessGroupUCC(ProcessGroup):
|
435 |
+
def __init__(
|
436 |
+
self,
|
437 |
+
store: Store,
|
438 |
+
rank: int,
|
439 |
+
size: int,
|
440 |
+
timeout: timedelta,
|
441 |
+
): ...
|
442 |
+
|
443 |
+
class ProcessGroupMPI(ProcessGroup):
|
444 |
+
def __init__(
|
445 |
+
self,
|
446 |
+
rank: int,
|
447 |
+
size: int,
|
448 |
+
pgComm: int,
|
449 |
+
): ...
|
450 |
+
@staticmethod
|
451 |
+
def create(ranks: List[int]) -> ProcessGroupMPI: ...
|
452 |
+
|
453 |
+
def _compute_bucket_assignment_by_size(
|
454 |
+
tensors: List[Tensor],
|
455 |
+
bucket_size_limits: List[int],
|
456 |
+
expect_sparse_gradient: List[bool] = ...,
|
457 |
+
tensor_indices: List[int] = ...,
|
458 |
+
) -> Tuple[List[List[int]], List[int]]: ...
|
459 |
+
def _broadcast_coalesced(
|
460 |
+
process_group: ProcessGroup,
|
461 |
+
tensors: List[Tensor],
|
462 |
+
buffer_size: int,
|
463 |
+
src: int,
|
464 |
+
): ...
|
465 |
+
def _test_python_store(store: Store): ...
|
466 |
+
def _verify_params_across_processes(
|
467 |
+
process_group: ProcessGroup,
|
468 |
+
params: List[Tensor],
|
469 |
+
logger: Optional[Logger],
|
470 |
+
): ...
|
471 |
+
def _make_nccl_premul_sum(factor: Union[float, List[Tensor]]) -> ReduceOp: ...
|
472 |
+
|
473 |
+
class Backend:
|
474 |
+
def __init__(
|
475 |
+
self,
|
476 |
+
rank: int,
|
477 |
+
size: int,
|
478 |
+
): ...
|
env-llmeval/lib/python3.10/site-packages/torch/_C/_distributed_rpc_testing.pyi
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict, List
|
2 |
+
|
3 |
+
import torch
|
4 |
+
|
5 |
+
from ._distributed_c10d import Store
|
6 |
+
from ._distributed_rpc import _TensorPipeRpcBackendOptionsBase, TensorPipeAgent
|
7 |
+
|
8 |
+
# This module is defined in torch/csrc/distributed/rpc/testing/init.cpp
|
9 |
+
|
10 |
+
class FaultyTensorPipeRpcBackendOptions(_TensorPipeRpcBackendOptionsBase):
|
11 |
+
def __init__(
|
12 |
+
self,
|
13 |
+
num_worker_threads: int,
|
14 |
+
rpc_timeout: float,
|
15 |
+
init_method: str,
|
16 |
+
messages_to_fail: List[str],
|
17 |
+
messages_to_delay: Dict[str, float],
|
18 |
+
num_fail_sends: int,
|
19 |
+
): ...
|
20 |
+
num_send_recv_threads: int
|
21 |
+
messages_to_fail: List[str]
|
22 |
+
messages_to_delay: Dict[str, float]
|
23 |
+
num_fail_sends: int
|
24 |
+
|
25 |
+
class FaultyTensorPipeAgent(TensorPipeAgent):
|
26 |
+
def __init__(
|
27 |
+
self,
|
28 |
+
store: Store,
|
29 |
+
name: str,
|
30 |
+
rank: int,
|
31 |
+
world_size: int,
|
32 |
+
options: FaultyTensorPipeRpcBackendOptions,
|
33 |
+
reverse_device_maps: Dict[str, Dict[torch.device, torch.device]],
|
34 |
+
devices: List[torch.device],
|
35 |
+
): ...
|
env-llmeval/lib/python3.10/site-packages/torch/_C/_functorch.pyi
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from enum import Enum
|
2 |
+
from typing import Optional, Tuple
|
3 |
+
|
4 |
+
from torch import Tensor
|
5 |
+
|
6 |
+
# Defined in torch/csrc/functorch/init.cpp
|
7 |
+
|
8 |
+
def _set_dynamic_layer_keys_included(included: bool) -> None: ...
|
9 |
+
def get_unwrapped(tensor: Tensor) -> Tensor: ...
|
10 |
+
def is_batchedtensor(tensor: Tensor) -> bool: ...
|
11 |
+
def is_functionaltensor(tensor: Tensor) -> bool: ...
|
12 |
+
def is_functorch_wrapped_tensor(tensor: Tensor) -> bool: ...
|
13 |
+
def is_gradtrackingtensor(tensor: Tensor) -> bool: ...
|
14 |
+
def maybe_get_bdim(tensor: Tensor) -> int: ...
|
15 |
+
def maybe_get_level(tensor: Tensor) -> int: ...
|
16 |
+
def unwrap_if_dead(tensor: Tensor) -> Tensor: ...
|
17 |
+
def _unwrap_for_grad(tensor: Tensor, level: int) -> Tensor: ...
|
18 |
+
def _wrap_for_grad(tensor: Tensor, level: int) -> Tensor: ...
|
19 |
+
def _unwrap_batched(tensor: Tensor, level: int) -> Tuple[Tensor, Optional[int]]: ...
|
20 |
+
def current_level() -> int: ...
|
21 |
+
def _add_batch_dim(tensor: Tensor, bdim: int, level: int) -> Tensor: ...
|
22 |
+
def set_single_level_autograd_function_allowed(allowed: bool) -> None: ...
|
23 |
+
def get_single_level_autograd_function_allowed() -> bool: ...
|
24 |
+
def _unwrap_functional_tensor(tensor: Tensor, reapply_views: bool) -> Tensor: ...
|
25 |
+
def _wrap_functional_tensor(tensor: Tensor, level: int) -> Tensor: ...
|
26 |
+
|
27 |
+
# Defined in aten/src/ATen/functorch/Interpreter.h
|
28 |
+
class TransformType(Enum):
|
29 |
+
Torch: TransformType = ...
|
30 |
+
Vmap: TransformType = ...
|
31 |
+
Grad: TransformType = ...
|
32 |
+
Jvp: TransformType = ...
|
33 |
+
Functionalize: TransformType = ...
|
34 |
+
|
35 |
+
class RandomnessType(Enum):
|
36 |
+
Error: TransformType = ...
|
37 |
+
Same: TransformType = ...
|
38 |
+
Different: TransformType = ...
|
39 |
+
|
40 |
+
class CInterpreter:
|
41 |
+
def key(self) -> TransformType: ...
|
42 |
+
def level(self) -> int: ...
|
43 |
+
|
44 |
+
class CGradInterpreterPtr:
|
45 |
+
def __init__(self, interpreter: CInterpreter): ...
|
46 |
+
def lift(self, Tensor) -> Tensor: ...
|
47 |
+
def prevGradMode(self) -> bool: ...
|
48 |
+
|
49 |
+
class CJvpInterpreterPtr:
|
50 |
+
def __init__(self, interpreter: CInterpreter): ...
|
51 |
+
def lift(self, Tensor) -> Tensor: ...
|
52 |
+
def prevFwdGradMode(self) -> bool: ...
|
53 |
+
|
54 |
+
class CFunctionalizeInterpreterPtr:
|
55 |
+
def __init__(self, interpreter: CInterpreter): ...
|
56 |
+
def key(self) -> TransformType: ...
|
57 |
+
def level(self) -> int: ...
|
58 |
+
def functionalizeAddBackViews(self) -> bool: ...
|
59 |
+
|
60 |
+
class CVmapInterpreterPtr:
|
61 |
+
def __init__(self, interpreter: CInterpreter): ...
|
62 |
+
def key(self) -> TransformType: ...
|
63 |
+
def level(self) -> int: ...
|
64 |
+
def batchSize(self) -> int: ...
|
65 |
+
def randomness(self) -> RandomnessType: ...
|
66 |
+
|
67 |
+
class DynamicLayer: ...
|
68 |
+
|
69 |
+
def peek_interpreter_stack() -> CInterpreter: ...
|
70 |
+
def pop_dynamic_layer_stack() -> DynamicLayer: ...
|
71 |
+
def push_dynamic_layer_stack(dl: DynamicLayer) -> int: ...
|
env-llmeval/lib/python3.10/site-packages/torch/_C/_itt.pyi
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Defined in torch/csrc/itt.cpp
|
2 |
+
def is_available() -> None: ...
|
3 |
+
def rangePush(message: str) -> None: ...
|
4 |
+
def rangePop() -> None: ...
|
5 |
+
def mark(message: str) -> None: ...
|
env-llmeval/lib/python3.10/site-packages/torch/_C/_lazy.pyi
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List
|
2 |
+
|
3 |
+
from torch import Tensor
|
4 |
+
|
5 |
+
# defined in torch/csrc/lazy/python/init.cpp
|
6 |
+
def _mark_step(device: str, devices: List[str], wait: bool): ...
|
7 |
+
def _wait_device_ops(devices: List[str]): ...
|
8 |
+
def _reset_metrics(): ...
|
9 |
+
def _counter_names() -> List[str]: ...
|
10 |
+
def _counter_value(name: str) -> int: ...
|
11 |
+
def _metrics_report() -> str: ...
|
12 |
+
def _get_graph_hash(tensors: List[Tensor]) -> str: ...
|
13 |
+
def _sync_multi(
|
14 |
+
tensors: List[Tensor],
|
15 |
+
devices: List[str],
|
16 |
+
wait: bool = True,
|
17 |
+
sync_ltc_data: bool = True,
|
18 |
+
): ...
|
19 |
+
def _get_tensor_id(tensor: Tensor) -> int: ...
|
20 |
+
def _get_tensors_text(tensors: List[Tensor]) -> str: ...
|
21 |
+
def _get_tensors_dot(tensors: List[Tensor]) -> str: ...
|
22 |
+
def _get_tensors_backend(tensors: List[Tensor]) -> str: ...
|
23 |
+
def _get_force_fallback() -> str: ...
|
24 |
+
def _set_force_fallback(newval: str): ...
|
25 |
+
def _clear_ir_cache(): ...
|
26 |
+
def _dump_ir_cache(filename: str): ...
|
27 |
+
def _set_reuse_ir(val: bool): ...
|
28 |
+
def _get_default_device_type(): ...
|
env-llmeval/lib/python3.10/site-packages/torch/_C/_lazy_ts_backend.pyi
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# defined in torch/csrc/lazy/python/init.cpp
|
2 |
+
|
3 |
+
from typing import Any, List, Tuple
|
4 |
+
|
5 |
+
from torch import Tensor
|
6 |
+
|
7 |
+
def _init(): ...
|
8 |
+
def _get_tensors_ts_device_data_node(
|
9 |
+
tensors: List[Tensor],
|
10 |
+
) -> Tuple[List[int], List[Any]]: ...
|
11 |
+
def _run_cached_graph(hash_str: str, graph_inputs: List[Any]) -> List[Tensor]: ...
|
env-llmeval/lib/python3.10/site-packages/torch/_C/_monitor.pyi
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Defined in torch/csrc/monitor/python_init.cpp
|
2 |
+
|
3 |
+
import datetime
|
4 |
+
from enum import Enum
|
5 |
+
from typing import Callable, Dict, List, Union
|
6 |
+
|
7 |
+
class Aggregation(Enum):
|
8 |
+
VALUE = ...
|
9 |
+
MEAN = ...
|
10 |
+
COUNT = ...
|
11 |
+
SUM = ...
|
12 |
+
MAX = ...
|
13 |
+
MIN = ...
|
14 |
+
|
15 |
+
class Stat:
|
16 |
+
name: str
|
17 |
+
count: int
|
18 |
+
def __init__(
|
19 |
+
self,
|
20 |
+
name: str,
|
21 |
+
aggregations: List[Aggregation],
|
22 |
+
window_size: int,
|
23 |
+
max_samples: int = -1,
|
24 |
+
) -> None: ...
|
25 |
+
def add(self, v: float) -> None: ...
|
26 |
+
def get(self) -> Dict[Aggregation, float]: ...
|
27 |
+
|
28 |
+
class Event:
|
29 |
+
name: str
|
30 |
+
timestamp: datetime.datetime
|
31 |
+
data: Dict[str, Union[int, float, bool, str]]
|
32 |
+
def __init__(
|
33 |
+
self,
|
34 |
+
name: str,
|
35 |
+
timestamp: datetime.datetime,
|
36 |
+
data: Dict[str, Union[int, float, bool, str]],
|
37 |
+
) -> None: ...
|
38 |
+
|
39 |
+
def log_event(e: Event) -> None: ...
|
40 |
+
|
41 |
+
class EventHandlerHandle: ...
|
42 |
+
|
43 |
+
def register_event_handler(handler: Callable[[Event], None]) -> EventHandlerHandle: ...
|
44 |
+
def unregister_event_handler(handle: EventHandlerHandle) -> None: ...
|
env-llmeval/lib/python3.10/site-packages/torch/_C/_profiler.pyi
ADDED
@@ -0,0 +1,238 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from enum import Enum
|
2 |
+
from typing import Any, Dict, List, Literal, Optional, Tuple, Union
|
3 |
+
|
4 |
+
from torch._C import device, dtype, layout
|
5 |
+
from typing_extensions import TypeAlias
|
6 |
+
|
7 |
+
# defined in torch/csrc/profiler/python/init.cpp
|
8 |
+
|
9 |
+
class RecordScope(Enum):
|
10 |
+
FUNCTION = ...
|
11 |
+
BACKWARD_FUNCTION = ...
|
12 |
+
TORCHSCRIPT_FUNCTION = ...
|
13 |
+
KERNEL_FUNCTION_DTYPE = ...
|
14 |
+
CUSTOM_CLASS = ...
|
15 |
+
BUILD_FEATURE = ...
|
16 |
+
LITE_INTERPRETER = ...
|
17 |
+
USER_SCOPE = ...
|
18 |
+
STATIC_RUNTIME_OP = ...
|
19 |
+
STATIC_RUNTIME_MODEL = ...
|
20 |
+
|
21 |
+
class ProfilerState(Enum):
|
22 |
+
Disable = ...
|
23 |
+
CPU = ...
|
24 |
+
CUDA = ...
|
25 |
+
NVTX = ...
|
26 |
+
ITT = ...
|
27 |
+
KINETO = ...
|
28 |
+
KINETO_GPU_FALLBACK = ...
|
29 |
+
KINETO_PRIVATEUSE1_FALLBACK = ...
|
30 |
+
KINETO_PRIVATEUSE1 = ...
|
31 |
+
|
32 |
+
class ActiveProfilerType(Enum):
|
33 |
+
NONE = ...
|
34 |
+
LEGACY = ...
|
35 |
+
KINETO = ...
|
36 |
+
NVTX = ...
|
37 |
+
ITT = ...
|
38 |
+
|
39 |
+
class ProfilerActivity(Enum):
|
40 |
+
CPU = ...
|
41 |
+
CUDA = ...
|
42 |
+
MTIA = ...
|
43 |
+
PrivateUse1 = ...
|
44 |
+
|
45 |
+
class _EventType(Enum):
|
46 |
+
TorchOp = ...
|
47 |
+
Backend = ...
|
48 |
+
Allocation = ...
|
49 |
+
OutOfMemory = ...
|
50 |
+
PyCall = ...
|
51 |
+
PyCCall = ...
|
52 |
+
Kineto = ...
|
53 |
+
|
54 |
+
class _ExperimentalConfig:
|
55 |
+
def __init__(
|
56 |
+
self,
|
57 |
+
profiler_metrics: List[str] = ...,
|
58 |
+
profiler_measure_per_kernel: bool = ...,
|
59 |
+
verbose: bool = ...,
|
60 |
+
performance_events: List[str] = ...,
|
61 |
+
enable_cuda_sync_events: bool = ...,
|
62 |
+
) -> None: ...
|
63 |
+
|
64 |
+
class ProfilerConfig:
|
65 |
+
def __init__(
|
66 |
+
self,
|
67 |
+
state: ProfilerState,
|
68 |
+
report_input_shapes: bool,
|
69 |
+
profile_memory: bool,
|
70 |
+
with_stack: bool,
|
71 |
+
with_flops: bool,
|
72 |
+
with_modules: bool,
|
73 |
+
experimental_config: _ExperimentalConfig,
|
74 |
+
) -> None: ...
|
75 |
+
|
76 |
+
class _ProfilerEvent:
|
77 |
+
start_tid: int
|
78 |
+
start_time_ns: int
|
79 |
+
children: List[_ProfilerEvent]
|
80 |
+
|
81 |
+
# TODO(robieta): remove in favor of `self.typed`
|
82 |
+
extra_fields: Union[
|
83 |
+
_ExtraFields_TorchOp,
|
84 |
+
_ExtraFields_Backend,
|
85 |
+
_ExtraFields_Allocation,
|
86 |
+
_ExtraFields_OutOfMemory,
|
87 |
+
_ExtraFields_PyCall,
|
88 |
+
_ExtraFields_PyCCall,
|
89 |
+
_ExtraFields_Kineto,
|
90 |
+
]
|
91 |
+
|
92 |
+
@property
|
93 |
+
def typed(
|
94 |
+
self,
|
95 |
+
) -> Union[
|
96 |
+
Tuple[Literal[_EventType.TorchOp], _ExtraFields_TorchOp],
|
97 |
+
Tuple[Literal[_EventType.Backend], _ExtraFields_Backend],
|
98 |
+
Tuple[Literal[_EventType.Allocation], _ExtraFields_Allocation],
|
99 |
+
Tuple[Literal[_EventType.OutOfMemory], _ExtraFields_OutOfMemory],
|
100 |
+
Tuple[Literal[_EventType.PyCall], _ExtraFields_PyCall],
|
101 |
+
Tuple[Literal[_EventType.PyCCall], _ExtraFields_PyCCall],
|
102 |
+
Tuple[Literal[_EventType.Kineto], _ExtraFields_Kineto],
|
103 |
+
]: ...
|
104 |
+
@property
|
105 |
+
def name(self) -> str: ...
|
106 |
+
@property
|
107 |
+
def tag(self) -> _EventType: ...
|
108 |
+
@property
|
109 |
+
def id(self) -> int: ...
|
110 |
+
@property
|
111 |
+
def parent(self) -> Optional[_ProfilerEvent]: ...
|
112 |
+
@property
|
113 |
+
def correlation_id(self) -> int: ...
|
114 |
+
@property
|
115 |
+
def end_time_ns(self) -> int: ...
|
116 |
+
@property
|
117 |
+
def duration_time_ns(self) -> int: ...
|
118 |
+
|
119 |
+
class _TensorMetadata:
|
120 |
+
impl_ptr: Optional[int]
|
121 |
+
storage_data_ptr: Optional[int]
|
122 |
+
id: Optional[int]
|
123 |
+
|
124 |
+
@property
|
125 |
+
def allocation_id(self) -> Optional[int]: ...
|
126 |
+
@property
|
127 |
+
def layout(self) -> layout: ...
|
128 |
+
@property
|
129 |
+
def device(self) -> device: ...
|
130 |
+
@property
|
131 |
+
def dtype(self) -> dtype: ...
|
132 |
+
@property
|
133 |
+
def sizes(self) -> List[int]: ...
|
134 |
+
@property
|
135 |
+
def strides(self) -> List[int]: ...
|
136 |
+
|
137 |
+
Scalar: TypeAlias = Union[int, float, bool, complex]
|
138 |
+
Input: TypeAlias = Optional[Union[_TensorMetadata, List[_TensorMetadata], Scalar]]
|
139 |
+
|
140 |
+
class _ExtraFields_TorchOp:
|
141 |
+
name: str
|
142 |
+
sequence_number: int
|
143 |
+
allow_tf32_cublas: bool
|
144 |
+
|
145 |
+
@property
|
146 |
+
def inputs(self) -> List[Input]: ...
|
147 |
+
@property
|
148 |
+
def scope(self) -> RecordScope: ...
|
149 |
+
|
150 |
+
class _ExtraFields_Backend: ...
|
151 |
+
|
152 |
+
class _ExtraFields_Allocation:
|
153 |
+
ptr: int
|
154 |
+
id: Optional[int]
|
155 |
+
alloc_size: int
|
156 |
+
total_allocated: int
|
157 |
+
total_reserved: int
|
158 |
+
|
159 |
+
@property
|
160 |
+
def allocation_id(self) -> Optional[int]: ...
|
161 |
+
@property
|
162 |
+
def device(self) -> device: ...
|
163 |
+
|
164 |
+
class _ExtraFields_OutOfMemory: ...
|
165 |
+
|
166 |
+
class _PyFrameState:
|
167 |
+
line_number: int
|
168 |
+
function_name: str
|
169 |
+
|
170 |
+
@property
|
171 |
+
def file_name(self) -> str: ...
|
172 |
+
|
173 |
+
class _NNModuleInfo:
|
174 |
+
@property
|
175 |
+
def self_ptr(self) -> int: ...
|
176 |
+
@property
|
177 |
+
def cls_ptr(self) -> int: ...
|
178 |
+
@property
|
179 |
+
def cls_name(self) -> str: ...
|
180 |
+
@property
|
181 |
+
def parameters(
|
182 |
+
self,
|
183 |
+
) -> List[Tuple[str, _TensorMetadata, Optional[_TensorMetadata]]]: ...
|
184 |
+
|
185 |
+
class _OptimizerInfo:
|
186 |
+
@property
|
187 |
+
def parameters(
|
188 |
+
self,
|
189 |
+
) -> List[
|
190 |
+
Tuple[
|
191 |
+
# Parameter
|
192 |
+
_TensorMetadata,
|
193 |
+
#
|
194 |
+
# Gradient (if present during optimizer.step())
|
195 |
+
Optional[_TensorMetadata],
|
196 |
+
#
|
197 |
+
# Optimizer state for Parameter as (name, tensor) pairs
|
198 |
+
List[Tuple[str, _TensorMetadata]],
|
199 |
+
]
|
200 |
+
]: ...
|
201 |
+
|
202 |
+
class _ExtraFields_PyCCall:
|
203 |
+
@property
|
204 |
+
def caller(self) -> _PyFrameState: ...
|
205 |
+
|
206 |
+
class _ExtraFields_PyCall:
|
207 |
+
@property
|
208 |
+
def callsite(self) -> _PyFrameState: ...
|
209 |
+
@property
|
210 |
+
def caller(self) -> _PyFrameState: ...
|
211 |
+
@property
|
212 |
+
def module(self) -> Optional[_NNModuleInfo]: ...
|
213 |
+
@property
|
214 |
+
def optimizer(self) -> Optional[_OptimizerInfo]: ...
|
215 |
+
|
216 |
+
class _ExtraFields_Kineto: ...
|
217 |
+
|
218 |
+
def _add_execution_trace_observer(output_file_path: str) -> bool: ...
|
219 |
+
def _remove_execution_trace_observer() -> None: ...
|
220 |
+
def _enable_execution_trace_observer() -> None: ...
|
221 |
+
def _disable_execution_trace_observer() -> None: ...
|
222 |
+
def _set_record_concrete_inputs_enabled_val(val: bool) -> None: ...
|
223 |
+
def _set_fwd_bwd_enabled_val(val: bool) -> None: ...
|
224 |
+
def _set_cuda_sync_enabled_val(val: bool) -> None: ...
|
225 |
+
|
226 |
+
class CapturedTraceback: ...
|
227 |
+
|
228 |
+
def gather_traceback(python: bool, script: bool, cpp: bool) -> CapturedTraceback: ...
|
229 |
+
|
230 |
+
# The Dict has name, filename, line
|
231 |
+
def symbolize_tracebacks(
|
232 |
+
to_symbolize: List[CapturedTraceback],
|
233 |
+
) -> List[List[Dict[str, str]]]: ...
|
234 |
+
|
235 |
+
class _RecordFunctionFast:
|
236 |
+
def __init__(self, name: str) -> None: ...
|
237 |
+
def __enter__(self) -> None: ...
|
238 |
+
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: ...
|
env-llmeval/lib/python3.10/site-packages/torch/_refs/__init__.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (138 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_refs/__pycache__/_conversions.cpython-310.pyc
ADDED
Binary file (2.58 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_refs/__pycache__/fft.cpython-310.pyc
ADDED
Binary file (16 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_refs/_conversions.py
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch._prims_common as utils
|
3 |
+
|
4 |
+
# Utilities should come BEFORE this import
|
5 |
+
from torch._decomp import register_decomposition
|
6 |
+
|
7 |
+
from torch._prims_common import TensorLikeType
|
8 |
+
from torch._prims_common.wrappers import out_wrapper
|
9 |
+
from torch._refs import _broadcast_shapes
|
10 |
+
|
11 |
+
# Data conversion references.
|
12 |
+
#
|
13 |
+
# Note: this module breaks the usual _refs to torch naming scheme where
|
14 |
+
# _refs.foo.bar is a ref for torch.foo.bar. The following definitions are not
|
15 |
+
# part of _refs/__init__.py to avoid name clashes with Python builtin types
|
16 |
+
# (like int).
|
17 |
+
|
18 |
+
__all__ = [
|
19 |
+
# dtypes
|
20 |
+
"bfloat16",
|
21 |
+
"bool",
|
22 |
+
"byte",
|
23 |
+
"cdouble",
|
24 |
+
"cfloat",
|
25 |
+
"chalf",
|
26 |
+
"char",
|
27 |
+
"double",
|
28 |
+
"float",
|
29 |
+
"half",
|
30 |
+
"int",
|
31 |
+
"long",
|
32 |
+
"short",
|
33 |
+
# misc
|
34 |
+
"complex",
|
35 |
+
"polar",
|
36 |
+
]
|
37 |
+
|
38 |
+
|
39 |
+
def _make_conversion_method(name: str, dtype: torch.dtype):
|
40 |
+
def fn(
|
41 |
+
self: TensorLikeType, memory_format: torch.memory_format = torch.preserve_format
|
42 |
+
) -> TensorLikeType:
|
43 |
+
return self.to(dtype, memory_format=memory_format) # type: ignore[call-overload]
|
44 |
+
|
45 |
+
fn.__name__ = name
|
46 |
+
return fn
|
47 |
+
|
48 |
+
|
49 |
+
bfloat16 = _make_conversion_method("bfloat16", torch.bfloat16)
|
50 |
+
|
51 |
+
bool = _make_conversion_method("bool", torch.bool)
|
52 |
+
|
53 |
+
byte = _make_conversion_method("byte", torch.uint8)
|
54 |
+
|
55 |
+
cdouble = _make_conversion_method("cdouble", torch.cdouble)
|
56 |
+
|
57 |
+
cfloat = _make_conversion_method("cfloat", torch.cfloat)
|
58 |
+
|
59 |
+
chalf = _make_conversion_method("chalf", torch.complex32)
|
60 |
+
|
61 |
+
char = _make_conversion_method("char", torch.int8)
|
62 |
+
|
63 |
+
double = _make_conversion_method("double", torch.double)
|
64 |
+
|
65 |
+
float = _make_conversion_method("float", torch.float)
|
66 |
+
|
67 |
+
half = _make_conversion_method("half", torch.half)
|
68 |
+
|
69 |
+
int = _make_conversion_method("int", torch.int)
|
70 |
+
|
71 |
+
long = _make_conversion_method("long", torch.long)
|
72 |
+
|
73 |
+
short = _make_conversion_method("short", torch.short)
|
74 |
+
|
75 |
+
|
76 |
+
@register_decomposition(torch._ops.ops.aten.complex)
|
77 |
+
# Note: complex has type promotion tests disabled due to different semantics.
|
78 |
+
# exact_dtype is for compat with complex_check_dtype from core.
|
79 |
+
@out_wrapper(exact_dtype=True)
|
80 |
+
def complex(real: TensorLikeType, imag: TensorLikeType) -> TensorLikeType:
|
81 |
+
allowed_dtypes = (torch.float32, torch.float64, torch.float16)
|
82 |
+
torch._check(
|
83 |
+
real.dtype in allowed_dtypes and imag.dtype in allowed_dtypes,
|
84 |
+
lambda: (
|
85 |
+
f"Expected both inputs to be Half, Float or Double tensors but got "
|
86 |
+
f"{real.dtype} and {imag.dtype}"
|
87 |
+
),
|
88 |
+
)
|
89 |
+
torch._check(
|
90 |
+
real.dtype == imag.dtype,
|
91 |
+
lambda: (
|
92 |
+
f"Expected object of scalar type {real.dtype} but got "
|
93 |
+
f"scalar type {imag.dtype} for second argument"
|
94 |
+
),
|
95 |
+
)
|
96 |
+
result_dtype = utils.corresponding_complex_dtype(real.dtype) # type: ignore[arg-type]
|
97 |
+
common_shape = _broadcast_shapes(real.shape, imag.shape)
|
98 |
+
result = real.new_empty(
|
99 |
+
common_shape,
|
100 |
+
dtype=result_dtype,
|
101 |
+
layout=real.layout,
|
102 |
+
device=real.device,
|
103 |
+
# pin_memory=real.is_pinned(), # NYI
|
104 |
+
)
|
105 |
+
result.real = real
|
106 |
+
result.imag = imag
|
107 |
+
return result
|
108 |
+
|
109 |
+
|
110 |
+
@register_decomposition(torch._ops.ops.aten.polar)
|
111 |
+
# Note: polar has type promotion tests disabled due to different semantics.
|
112 |
+
# exact_dtype is for compat with complex_check_dtype from core.
|
113 |
+
@out_wrapper(exact_dtype=True)
|
114 |
+
def polar(abs: TensorLikeType, angle: TensorLikeType) -> TensorLikeType:
|
115 |
+
result = torch.complex(abs, angle)
|
116 |
+
result.real = abs * torch.cos(angle)
|
117 |
+
result.imag = abs * torch.sin(angle)
|
118 |
+
return result
|
env-llmeval/lib/python3.10/site-packages/torch/_refs/fft.py
ADDED
@@ -0,0 +1,590 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
|
3 |
+
from typing import Iterable, List, Literal, NamedTuple, Optional, Sequence, Tuple, Union
|
4 |
+
|
5 |
+
import torch
|
6 |
+
import torch._prims as prims
|
7 |
+
import torch._prims_common as utils
|
8 |
+
from torch._decomp import register_decomposition
|
9 |
+
from torch._prims_common import DimsType, ShapeType, TensorLikeType
|
10 |
+
from torch._prims_common.wrappers import _maybe_convert_to_dtype, out_wrapper
|
11 |
+
|
12 |
+
__all__ = [
|
13 |
+
# Transforms
|
14 |
+
"fft",
|
15 |
+
"fft2",
|
16 |
+
"fftn",
|
17 |
+
"hfft",
|
18 |
+
"hfft2",
|
19 |
+
"hfftn",
|
20 |
+
"rfft",
|
21 |
+
"rfft2",
|
22 |
+
"rfftn",
|
23 |
+
"ifft",
|
24 |
+
"ifft2",
|
25 |
+
"ifftn",
|
26 |
+
"ihfft",
|
27 |
+
"ihfft2",
|
28 |
+
"ihfftn",
|
29 |
+
"irfft",
|
30 |
+
"irfft2",
|
31 |
+
"irfftn",
|
32 |
+
# Helpers
|
33 |
+
"fftshift",
|
34 |
+
"ifftshift",
|
35 |
+
]
|
36 |
+
|
37 |
+
NormType = Union[None, Literal["forward", "backward", "ortho"]]
|
38 |
+
_NORM_VALUES = {None, "forward", "backward", "ortho"}
|
39 |
+
aten = torch._ops.ops.aten
|
40 |
+
|
41 |
+
|
42 |
+
def _apply_norm(
|
43 |
+
x: TensorLikeType, norm: NormType, signal_numel: int, forward: bool
|
44 |
+
) -> TensorLikeType:
|
45 |
+
"""Apply normalization to the un-normalized FFT result"""
|
46 |
+
torch._check(norm in _NORM_VALUES, lambda: f"Invalid normalization mode: {norm}")
|
47 |
+
|
48 |
+
if norm == "ortho":
|
49 |
+
return x * (1 / math.sqrt(signal_numel))
|
50 |
+
|
51 |
+
normalize = (not forward and (norm is None or norm == "backward")) or (
|
52 |
+
forward and norm == "forward"
|
53 |
+
)
|
54 |
+
return x * (1 / signal_numel) if normalize else x
|
55 |
+
|
56 |
+
|
57 |
+
def _promote_type_fft(
|
58 |
+
dtype: torch.dtype, require_complex: bool, device: torch.device
|
59 |
+
) -> torch.dtype:
|
60 |
+
"""Helper to promote a dtype to one supported by the FFT primitives"""
|
61 |
+
if dtype.is_complex:
|
62 |
+
return dtype
|
63 |
+
|
64 |
+
# Promote integral to default float type
|
65 |
+
if not dtype.is_floating_point:
|
66 |
+
dtype = torch.get_default_dtype()
|
67 |
+
|
68 |
+
allowed_types = [torch.float32, torch.float64]
|
69 |
+
maybe_support_half = device.type in ["cuda", "meta"] and not torch.version.hip
|
70 |
+
|
71 |
+
if maybe_support_half:
|
72 |
+
allowed_types.append(torch.float16)
|
73 |
+
torch._check(dtype in allowed_types, lambda: f"Unsupported dtype {dtype}")
|
74 |
+
|
75 |
+
if require_complex:
|
76 |
+
dtype = utils.corresponding_complex_dtype(dtype)
|
77 |
+
|
78 |
+
return dtype
|
79 |
+
|
80 |
+
|
81 |
+
def _maybe_promote_tensor_fft(
|
82 |
+
t: TensorLikeType, require_complex: bool = False
|
83 |
+
) -> TensorLikeType:
|
84 |
+
"""Helper to promote a tensor to a dtype supported by the FFT primitives"""
|
85 |
+
cur_type = t.dtype
|
86 |
+
new_type = _promote_type_fft(cur_type, require_complex, t.device)
|
87 |
+
return _maybe_convert_to_dtype(t, new_type) # type: ignore[return-value]
|
88 |
+
|
89 |
+
|
90 |
+
def _resize_fft_input(
|
91 |
+
x: TensorLikeType, dims: Tuple[int, ...], sizes: Tuple[int, ...]
|
92 |
+
) -> TensorLikeType:
|
93 |
+
"""
|
94 |
+
Fixes the shape of x such that x.size(dims[i]) == sizes[i],
|
95 |
+
either by zero-padding, or by slicing x starting from 0.
|
96 |
+
"""
|
97 |
+
assert len(dims) == len(sizes)
|
98 |
+
must_copy = False
|
99 |
+
x_sizes = x.shape
|
100 |
+
pad_amount = [0] * len(x_sizes) * 2
|
101 |
+
for i in range(len(dims)):
|
102 |
+
if sizes[i] == -1:
|
103 |
+
continue
|
104 |
+
|
105 |
+
if x_sizes[dims[i]] < sizes[i]:
|
106 |
+
must_copy = True
|
107 |
+
pad_idx = len(pad_amount) - 2 * dims[i] - 1
|
108 |
+
pad_amount[pad_idx] = sizes[i] - x_sizes[dims[i]]
|
109 |
+
|
110 |
+
if x_sizes[dims[i]] > sizes[i]:
|
111 |
+
x = x.narrow(dims[i], 0, sizes[i])
|
112 |
+
|
113 |
+
return torch.constant_pad_nd(x, pad_amount) if must_copy else x
|
114 |
+
|
115 |
+
|
116 |
+
def _fft_c2r(
|
117 |
+
func_name: str,
|
118 |
+
input: TensorLikeType,
|
119 |
+
n: Optional[int],
|
120 |
+
dim: int,
|
121 |
+
norm: NormType,
|
122 |
+
forward: bool,
|
123 |
+
) -> TensorLikeType:
|
124 |
+
"""Common code for performing any complex to real FFT (irfft or hfft)"""
|
125 |
+
input = _maybe_promote_tensor_fft(input, require_complex=True)
|
126 |
+
dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),)
|
127 |
+
last_dim_size = n if n is not None else 2 * (input.shape[dim] - 1)
|
128 |
+
torch._check(
|
129 |
+
last_dim_size >= 1,
|
130 |
+
lambda: f"Invalid number of data points ({last_dim_size}) specified",
|
131 |
+
)
|
132 |
+
|
133 |
+
if n is not None:
|
134 |
+
input = _resize_fft_input(input, dims=dims, sizes=(last_dim_size // 2 + 1,))
|
135 |
+
|
136 |
+
if forward:
|
137 |
+
input = torch.conj(input)
|
138 |
+
|
139 |
+
output = prims.fft_c2r(input, dim=dims, last_dim_size=last_dim_size)
|
140 |
+
return _apply_norm(output, norm=norm, signal_numel=last_dim_size, forward=forward)
|
141 |
+
|
142 |
+
|
143 |
+
def _fft_r2c(
|
144 |
+
func_name: str,
|
145 |
+
input: TensorLikeType,
|
146 |
+
n: Optional[int],
|
147 |
+
dim: int,
|
148 |
+
norm: NormType,
|
149 |
+
forward: bool,
|
150 |
+
onesided: bool,
|
151 |
+
) -> TensorLikeType:
|
152 |
+
"""Common code for performing any real to complex FFT (rfft or ihfft)"""
|
153 |
+
torch._check(
|
154 |
+
not input.dtype.is_complex,
|
155 |
+
lambda: f"{func_name} expects a floating point input tensor, but got {input.dtype}",
|
156 |
+
)
|
157 |
+
input = _maybe_promote_tensor_fft(input)
|
158 |
+
dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),)
|
159 |
+
dim_size = n if n is not None else input.shape[dim]
|
160 |
+
torch._check(
|
161 |
+
dim_size >= 1, lambda: f"Invalid number of data points ({dim_size}) specified"
|
162 |
+
)
|
163 |
+
|
164 |
+
if n is not None:
|
165 |
+
input = _resize_fft_input(input, dims, (n,))
|
166 |
+
|
167 |
+
ret = prims.fft_r2c(input, dim=dims, onesided=onesided)
|
168 |
+
ret = _apply_norm(ret, norm, dim_size, forward)
|
169 |
+
return ret if forward else torch.conj(ret)
|
170 |
+
|
171 |
+
|
172 |
+
def _fft_c2c(
|
173 |
+
func_name: str,
|
174 |
+
input: TensorLikeType,
|
175 |
+
n: Optional[int],
|
176 |
+
dim: int,
|
177 |
+
norm: NormType,
|
178 |
+
forward: bool,
|
179 |
+
) -> TensorLikeType:
|
180 |
+
"""Common code for performing any complex to complex FFT (fft or ifft)"""
|
181 |
+
torch._check(
|
182 |
+
input.dtype.is_complex,
|
183 |
+
lambda: f"{func_name} expects a complex input tensor, but got {input.dtype}",
|
184 |
+
)
|
185 |
+
dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),)
|
186 |
+
dim_size = n if n is not None else input.shape[dim]
|
187 |
+
torch._check(
|
188 |
+
dim_size >= 1, lambda: f"Invalid number of data points ({dim_size}) specified"
|
189 |
+
)
|
190 |
+
|
191 |
+
if n is not None:
|
192 |
+
input = _resize_fft_input(input, dims, (n,))
|
193 |
+
|
194 |
+
ret = prims.fft_c2c(input, dim=dims, forward=forward)
|
195 |
+
return _apply_norm(ret, norm, dim_size, forward)
|
196 |
+
|
197 |
+
|
198 |
+
@register_decomposition(aten.fft_fft)
|
199 |
+
@out_wrapper()
|
200 |
+
def fft(
|
201 |
+
input: TensorLikeType,
|
202 |
+
n: Optional[int] = None,
|
203 |
+
dim: int = -1,
|
204 |
+
norm: NormType = None,
|
205 |
+
) -> TensorLikeType:
|
206 |
+
if input.dtype.is_complex:
|
207 |
+
return _fft_c2c("fft", input, n, dim, norm, forward=True)
|
208 |
+
else:
|
209 |
+
return _fft_r2c("fft", input, n, dim, norm, forward=True, onesided=False)
|
210 |
+
|
211 |
+
|
212 |
+
@register_decomposition(aten.fft_ifft)
|
213 |
+
@out_wrapper()
|
214 |
+
def ifft(
|
215 |
+
input: TensorLikeType,
|
216 |
+
n: Optional[int] = None,
|
217 |
+
dim: int = -1,
|
218 |
+
norm: NormType = None,
|
219 |
+
) -> TensorLikeType:
|
220 |
+
if input.dtype.is_complex:
|
221 |
+
return _fft_c2c("ifft", input, n, dim, norm, forward=False)
|
222 |
+
else:
|
223 |
+
return _fft_r2c("ifft", input, n, dim, norm, forward=False, onesided=False)
|
224 |
+
|
225 |
+
|
226 |
+
@register_decomposition(aten.fft_rfft)
|
227 |
+
@out_wrapper()
|
228 |
+
def rfft(
|
229 |
+
input: TensorLikeType,
|
230 |
+
n: Optional[int] = None,
|
231 |
+
dim: int = -1,
|
232 |
+
norm: NormType = None,
|
233 |
+
) -> TensorLikeType:
|
234 |
+
return _fft_r2c("rfft", input, n, dim, norm, forward=True, onesided=True)
|
235 |
+
|
236 |
+
|
237 |
+
@register_decomposition(aten.fft_irfft)
|
238 |
+
@out_wrapper()
|
239 |
+
def irfft(
|
240 |
+
input: TensorLikeType,
|
241 |
+
n: Optional[int] = None,
|
242 |
+
dim: int = -1,
|
243 |
+
norm: NormType = None,
|
244 |
+
) -> TensorLikeType:
|
245 |
+
return _fft_c2r("irfft", input, n, dim, norm, forward=False)
|
246 |
+
|
247 |
+
|
248 |
+
@register_decomposition(aten.fft_hfft)
|
249 |
+
@out_wrapper()
|
250 |
+
def hfft(
|
251 |
+
input: TensorLikeType,
|
252 |
+
n: Optional[int] = None,
|
253 |
+
dim: int = -1,
|
254 |
+
norm: NormType = None,
|
255 |
+
) -> TensorLikeType:
|
256 |
+
return _fft_c2r("hfft", input, n, dim, norm, forward=True)
|
257 |
+
|
258 |
+
|
259 |
+
@register_decomposition(aten.fft_ihfft)
|
260 |
+
@out_wrapper()
|
261 |
+
def ihfft(
|
262 |
+
input: TensorLikeType,
|
263 |
+
n: Optional[int] = None,
|
264 |
+
dim: int = -1,
|
265 |
+
norm: NormType = None,
|
266 |
+
) -> TensorLikeType:
|
267 |
+
return _fft_r2c("ihfft", input, n, dim, norm, forward=False, onesided=True)
|
268 |
+
|
269 |
+
|
270 |
+
class _ShapeAndDims(NamedTuple):
|
271 |
+
shape: Tuple[int, ...]
|
272 |
+
dims: Tuple[int, ...]
|
273 |
+
|
274 |
+
|
275 |
+
def _canonicalize_fft_shape_and_dim_args(
|
276 |
+
input: TensorLikeType, shape: Optional[ShapeType], dim: Optional[DimsType]
|
277 |
+
) -> _ShapeAndDims:
|
278 |
+
"""Convert the shape and dim arguments into a canonical form where neither are optional"""
|
279 |
+
input_dim = input.ndim
|
280 |
+
input_sizes = input.shape
|
281 |
+
|
282 |
+
if dim is not None:
|
283 |
+
if not isinstance(dim, Sequence):
|
284 |
+
dim = (dim,)
|
285 |
+
ret_dims = utils.canonicalize_dims(input_dim, dim, wrap_scalar=False)
|
286 |
+
|
287 |
+
# Check dims are unique
|
288 |
+
torch._check(
|
289 |
+
len(set(ret_dims)) == len(ret_dims), lambda: "FFT dims must be unique"
|
290 |
+
)
|
291 |
+
|
292 |
+
if shape is not None:
|
293 |
+
if not isinstance(shape, Sequence):
|
294 |
+
shape = (shape,)
|
295 |
+
|
296 |
+
# Has shape, might have dim
|
297 |
+
torch._check(
|
298 |
+
dim is None or len(dim) == len(shape),
|
299 |
+
lambda: "When given, dim and shape arguments must have the same length",
|
300 |
+
)
|
301 |
+
transform_ndim = len(shape)
|
302 |
+
|
303 |
+
torch._check(
|
304 |
+
transform_ndim <= input_dim,
|
305 |
+
lambda: f"Got shape with {transform_ndim} values but input tensor "
|
306 |
+
f"only has {input_dim} dimensions.",
|
307 |
+
)
|
308 |
+
|
309 |
+
# If shape is given, dims defaults to the last len(shape) dimensions
|
310 |
+
if dim is None:
|
311 |
+
ret_dims = tuple(range(input_dim - transform_ndim, input_dim))
|
312 |
+
|
313 |
+
# Translate any -1 values in shape to the default length
|
314 |
+
ret_shape = tuple(
|
315 |
+
s if s != -1 else input_sizes[d] for (s, d) in zip(shape, ret_dims)
|
316 |
+
)
|
317 |
+
elif dim is None:
|
318 |
+
# No shape, no dim
|
319 |
+
ret_dims = tuple(range(input_dim))
|
320 |
+
ret_shape = tuple(input_sizes)
|
321 |
+
else:
|
322 |
+
# No shape, has dim
|
323 |
+
ret_shape = tuple(input_sizes[d] for d in ret_dims)
|
324 |
+
|
325 |
+
for n in ret_shape:
|
326 |
+
torch._check(n > 0, lambda: f"Invalid number of data points ({n}) specified")
|
327 |
+
|
328 |
+
return _ShapeAndDims(shape=ret_shape, dims=ret_dims)
|
329 |
+
|
330 |
+
|
331 |
+
def _prod(xs: Iterable[int]) -> int:
|
332 |
+
"""Compute product of a list"""
|
333 |
+
prod = 1
|
334 |
+
for x in xs:
|
335 |
+
prod *= x
|
336 |
+
return prod
|
337 |
+
|
338 |
+
|
339 |
+
def _fftn_c2c(
|
340 |
+
function_name: str,
|
341 |
+
input: TensorLikeType,
|
342 |
+
shape: Tuple[int, ...],
|
343 |
+
dim: Tuple[int, ...],
|
344 |
+
norm: NormType,
|
345 |
+
forward: bool,
|
346 |
+
) -> TensorLikeType:
|
347 |
+
"""Common code for n-dimensional complex to complex FFTs (fftn or ifftn)"""
|
348 |
+
torch._check(
|
349 |
+
input.dtype.is_complex,
|
350 |
+
lambda: f"{function_name} expects a complex input tensor, "
|
351 |
+
f"but got {input.dtype}",
|
352 |
+
)
|
353 |
+
x = _resize_fft_input(input, dim, shape)
|
354 |
+
output = prims.fft_c2c(x, dim=dim, forward=forward)
|
355 |
+
return _apply_norm(output, norm=norm, signal_numel=_prod(shape), forward=forward)
|
356 |
+
|
357 |
+
|
358 |
+
@register_decomposition(aten.fft_fftn)
|
359 |
+
@out_wrapper()
|
360 |
+
def fftn(
|
361 |
+
input: TensorLikeType,
|
362 |
+
s: Optional[ShapeType] = None,
|
363 |
+
dim: Optional[DimsType] = None,
|
364 |
+
norm: NormType = None,
|
365 |
+
) -> TensorLikeType:
|
366 |
+
(shape, dim) = _canonicalize_fft_shape_and_dim_args(input, s, dim)
|
367 |
+
x = _maybe_promote_tensor_fft(input, require_complex=True)
|
368 |
+
return _fftn_c2c("fftn", x, shape, dim, norm, forward=True)
|
369 |
+
|
370 |
+
|
371 |
+
@register_decomposition(aten.fft_ifftn)
|
372 |
+
@out_wrapper()
|
373 |
+
def ifftn(
|
374 |
+
input: TensorLikeType,
|
375 |
+
s: Optional[ShapeType] = None,
|
376 |
+
dim: Optional[DimsType] = None,
|
377 |
+
norm: NormType = None,
|
378 |
+
) -> TensorLikeType:
|
379 |
+
(shape, dim) = _canonicalize_fft_shape_and_dim_args(input, s, dim)
|
380 |
+
x = _maybe_promote_tensor_fft(input, require_complex=True)
|
381 |
+
return _fftn_c2c("ifftn", x, shape, dim, norm, forward=False)
|
382 |
+
|
383 |
+
|
384 |
+
@register_decomposition(aten.fft_rfftn)
|
385 |
+
@out_wrapper()
|
386 |
+
def rfftn(
|
387 |
+
input: TensorLikeType,
|
388 |
+
s: Optional[ShapeType] = None,
|
389 |
+
dim: Optional[DimsType] = None,
|
390 |
+
norm: NormType = None,
|
391 |
+
) -> TensorLikeType:
|
392 |
+
torch._check(
|
393 |
+
not input.dtype.is_complex,
|
394 |
+
lambda: f"rfftn expects a real-valued input tensor, but got {input.dtype}",
|
395 |
+
)
|
396 |
+
shape, dim = _canonicalize_fft_shape_and_dim_args(input, s, dim)
|
397 |
+
input = _maybe_promote_tensor_fft(input, require_complex=False)
|
398 |
+
input = _resize_fft_input(input, dim, shape)
|
399 |
+
out = prims.fft_r2c(input, dim=dim, onesided=True)
|
400 |
+
return _apply_norm(out, norm=norm, signal_numel=_prod(shape), forward=True)
|
401 |
+
|
402 |
+
|
403 |
+
@register_decomposition(aten.fft_ihfftn)
|
404 |
+
@out_wrapper()
|
405 |
+
def ihfftn(
|
406 |
+
input: TensorLikeType,
|
407 |
+
s: Optional[ShapeType] = None,
|
408 |
+
dim: Optional[DimsType] = None,
|
409 |
+
norm: NormType = None,
|
410 |
+
) -> TensorLikeType:
|
411 |
+
torch._check(
|
412 |
+
not input.dtype.is_complex,
|
413 |
+
lambda: f"ihfftn expects a real-valued input tensor, but got {input.dtype}",
|
414 |
+
)
|
415 |
+
shape, dim = _canonicalize_fft_shape_and_dim_args(input, s, dim)
|
416 |
+
torch._check(len(shape) > 0, lambda: "ihfftn must transform at least one axis")
|
417 |
+
input = _maybe_promote_tensor_fft(input, require_complex=False)
|
418 |
+
input = _resize_fft_input(input, dim, shape)
|
419 |
+
|
420 |
+
tmp = prims.fft_r2c(input, dim=dim[-1:], onesided=True)
|
421 |
+
|
422 |
+
if len(dim) == 1:
|
423 |
+
tmp = _apply_norm(tmp, norm=norm, signal_numel=shape[0], forward=False)
|
424 |
+
return prims.conj(tmp)
|
425 |
+
|
426 |
+
tmp = prims.conj_physical(tmp)
|
427 |
+
tmp = prims.fft_c2c(tmp, dim=dim[:-1], forward=False)
|
428 |
+
return _apply_norm(tmp, norm=norm, signal_numel=_prod(shape), forward=False)
|
429 |
+
|
430 |
+
|
431 |
+
class _CanonicalizeC2rReturn(NamedTuple):
|
432 |
+
shape: Tuple[int, ...]
|
433 |
+
dim: Tuple[int, ...]
|
434 |
+
last_dim_size: int
|
435 |
+
|
436 |
+
|
437 |
+
def _canonicalize_fft_c2r_shape_and_dim_args(
|
438 |
+
fname: str,
|
439 |
+
input: TensorLikeType,
|
440 |
+
s: Optional[ShapeType],
|
441 |
+
dim: Optional[DimsType],
|
442 |
+
) -> _CanonicalizeC2rReturn:
|
443 |
+
"""Canonicalize shape and dim arguments for n-dimensional c2r transforms,
|
444 |
+
as well as calculating the last_dim_size which is shape[dim[-1]] for the output"""
|
445 |
+
(shape, dim) = _canonicalize_fft_shape_and_dim_args(input, s, dim)
|
446 |
+
torch._check(len(shape) > 0, lambda: f"{fname} must transform at least one axis")
|
447 |
+
|
448 |
+
if s is None or s[-1] == -1:
|
449 |
+
last_dim_size = 2 * (input.shape[dim[-1]] - 1)
|
450 |
+
else:
|
451 |
+
last_dim_size = shape[-1]
|
452 |
+
|
453 |
+
torch._check(
|
454 |
+
last_dim_size >= 1,
|
455 |
+
lambda: f"Invalid number of data points ({last_dim_size}) specified",
|
456 |
+
)
|
457 |
+
|
458 |
+
shape_list = list(shape)
|
459 |
+
shape_list[-1] = last_dim_size // 2 + 1
|
460 |
+
return _CanonicalizeC2rReturn(
|
461 |
+
shape=tuple(shape_list), dim=dim, last_dim_size=last_dim_size
|
462 |
+
)
|
463 |
+
|
464 |
+
|
465 |
+
@register_decomposition(aten.fft_irfftn)
|
466 |
+
@out_wrapper()
|
467 |
+
def irfftn(
|
468 |
+
input: TensorLikeType,
|
469 |
+
s: Optional[ShapeType] = None,
|
470 |
+
dim: Optional[DimsType] = None,
|
471 |
+
norm: NormType = None,
|
472 |
+
) -> TensorLikeType:
|
473 |
+
shape, dim, last_dim_size = _canonicalize_fft_c2r_shape_and_dim_args(
|
474 |
+
"irfftn", input, s, dim
|
475 |
+
)
|
476 |
+
input = _maybe_promote_tensor_fft(input, require_complex=True)
|
477 |
+
input = _resize_fft_input(input, dim, shape)
|
478 |
+
out = prims.fft_c2r(input, dim=dim, last_dim_size=last_dim_size)
|
479 |
+
return _apply_norm(out, norm, _prod(out.shape[d] for d in dim), forward=False)
|
480 |
+
|
481 |
+
|
482 |
+
@register_decomposition(aten.fft_hfftn)
|
483 |
+
@out_wrapper()
|
484 |
+
def hfftn(
|
485 |
+
input: TensorLikeType,
|
486 |
+
s: Optional[ShapeType] = None,
|
487 |
+
dim: Optional[DimsType] = None,
|
488 |
+
norm: NormType = None,
|
489 |
+
) -> TensorLikeType:
|
490 |
+
shape, dim, last_dim_size = _canonicalize_fft_c2r_shape_and_dim_args(
|
491 |
+
"hfftn", input, s, dim
|
492 |
+
)
|
493 |
+
input = _maybe_promote_tensor_fft(input, require_complex=True)
|
494 |
+
input = _resize_fft_input(input, dim, shape)
|
495 |
+
|
496 |
+
tmp = prims.fft_c2c(input, dim=dim[:-1], forward=True) if len(dim) > 1 else input
|
497 |
+
tmp = _apply_norm(tmp, norm, _prod(shape[:-1]), forward=True)
|
498 |
+
tmp = prims.conj_physical(tmp)
|
499 |
+
out = prims.fft_c2r(tmp, dim=dim[-1:], last_dim_size=last_dim_size)
|
500 |
+
return _apply_norm(out, norm, last_dim_size, forward=True)
|
501 |
+
|
502 |
+
|
503 |
+
@register_decomposition(aten.fft_fft2)
|
504 |
+
@out_wrapper()
|
505 |
+
def fft2(
|
506 |
+
input: TensorLikeType,
|
507 |
+
s: Optional[ShapeType] = None,
|
508 |
+
dim: Optional[DimsType] = (-2, -1),
|
509 |
+
norm: NormType = None,
|
510 |
+
) -> TensorLikeType:
|
511 |
+
return torch.fft.fftn(input, s=s, dim=dim, norm=norm)
|
512 |
+
|
513 |
+
|
514 |
+
@register_decomposition(aten.fft_ifft2)
|
515 |
+
@out_wrapper()
|
516 |
+
def ifft2(
|
517 |
+
input: TensorLikeType,
|
518 |
+
s: Optional[ShapeType] = None,
|
519 |
+
dim: Optional[DimsType] = (-2, -1),
|
520 |
+
norm: NormType = None,
|
521 |
+
) -> TensorLikeType:
|
522 |
+
return torch.fft.ifftn(input, s=s, dim=dim, norm=norm)
|
523 |
+
|
524 |
+
|
525 |
+
@register_decomposition(aten.fft_rfft2)
|
526 |
+
@out_wrapper()
|
527 |
+
def rfft2(
|
528 |
+
input: TensorLikeType,
|
529 |
+
s: Optional[ShapeType] = None,
|
530 |
+
dim: Optional[DimsType] = (-2, -1),
|
531 |
+
norm: NormType = None,
|
532 |
+
) -> TensorLikeType:
|
533 |
+
return torch.fft.rfftn(input, s=s, dim=dim, norm=norm)
|
534 |
+
|
535 |
+
|
536 |
+
@register_decomposition(aten.fft_irfft2)
|
537 |
+
@out_wrapper()
|
538 |
+
def irfft2(
|
539 |
+
input: TensorLikeType,
|
540 |
+
s: Optional[ShapeType] = None,
|
541 |
+
dim: Optional[DimsType] = (-2, -1),
|
542 |
+
norm: NormType = None,
|
543 |
+
) -> TensorLikeType:
|
544 |
+
return torch.fft.irfftn(input, s=s, dim=dim, norm=norm)
|
545 |
+
|
546 |
+
|
547 |
+
@register_decomposition(aten.fft_hfft2)
|
548 |
+
@out_wrapper()
|
549 |
+
def hfft2(
|
550 |
+
input: TensorLikeType,
|
551 |
+
s: Optional[ShapeType] = None,
|
552 |
+
dim: Optional[DimsType] = (-2, -1),
|
553 |
+
norm: NormType = None,
|
554 |
+
) -> TensorLikeType:
|
555 |
+
return torch.fft.hfftn(input, s=s, dim=dim, norm=norm)
|
556 |
+
|
557 |
+
|
558 |
+
@register_decomposition(aten.fft_ihfft2)
|
559 |
+
@out_wrapper()
|
560 |
+
def ihfft2(
|
561 |
+
input: TensorLikeType,
|
562 |
+
s: Optional[ShapeType] = None,
|
563 |
+
dim: Optional[DimsType] = (-2, -1),
|
564 |
+
norm: NormType = None,
|
565 |
+
) -> TensorLikeType:
|
566 |
+
return torch.fft.ihfftn(input, s=s, dim=dim, norm=norm)
|
567 |
+
|
568 |
+
|
569 |
+
def _default_alldims(dim: Optional[DimsType], x: TensorLikeType) -> List[int]:
|
570 |
+
"""Convert Optional[DimsType] to a simple list, defaulting to all dimensions"""
|
571 |
+
if dim is None:
|
572 |
+
return list(range(x.ndim))
|
573 |
+
elif not isinstance(dim, Sequence):
|
574 |
+
return [dim]
|
575 |
+
else:
|
576 |
+
return list(dim)
|
577 |
+
|
578 |
+
|
579 |
+
@register_decomposition(aten.fft_fftshift)
|
580 |
+
def fftshift(input: TensorLikeType, dim: Optional[DimsType] = None) -> TensorLikeType:
|
581 |
+
dims = _default_alldims(dim, input)
|
582 |
+
shift = [input.shape[d] // 2 for d in dims]
|
583 |
+
return torch.roll(input, shift, dims)
|
584 |
+
|
585 |
+
|
586 |
+
@register_decomposition(aten.fft_ifftshift)
|
587 |
+
def ifftshift(input: TensorLikeType, dim: Optional[DimsType] = None) -> TensorLikeType:
|
588 |
+
dims = _default_alldims(dim, input)
|
589 |
+
shift = [(input.shape[d] + 1) // 2 for d in dims]
|
590 |
+
return torch.roll(input, shift, dims)
|
env-llmeval/lib/python3.10/site-packages/torch/_refs/linalg/__init__.py
ADDED
@@ -0,0 +1,276 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from functools import partial
|
2 |
+
|
3 |
+
from typing import List, Optional, Tuple, Union
|
4 |
+
|
5 |
+
import torch
|
6 |
+
|
7 |
+
import torch._prims as prims
|
8 |
+
|
9 |
+
import torch._prims_common as utils
|
10 |
+
import torch._refs as refs
|
11 |
+
import torch._refs.linalg as linalg
|
12 |
+
from torch import Tensor
|
13 |
+
from torch._prims_common import (
|
14 |
+
check_fp_or_complex,
|
15 |
+
check_is_matrix,
|
16 |
+
Dim,
|
17 |
+
DimsType,
|
18 |
+
ELEMENTWISE_TYPE_PROMOTION_KIND,
|
19 |
+
NumberType,
|
20 |
+
TensorLikeType,
|
21 |
+
)
|
22 |
+
from torch._prims_common.wrappers import (
|
23 |
+
_maybe_convert_to_dtype,
|
24 |
+
elementwise_type_promotion_wrapper,
|
25 |
+
out_wrapper,
|
26 |
+
)
|
27 |
+
|
28 |
+
|
29 |
+
__all__ = ["diagonal", "matrix_norm", "norm", "svd", "svdvals", "vector_norm", "vecdot"]
|
30 |
+
|
31 |
+
|
32 |
+
def _check_norm_dtype(dtype: Optional[torch.dtype], x_dtype: torch.dtype, fn_name: str):
|
33 |
+
"""
|
34 |
+
Checks related to the dtype kwarg in `linalg.*norm` functions
|
35 |
+
"""
|
36 |
+
if dtype is not None:
|
37 |
+
torch._check(
|
38 |
+
utils.is_float_dtype(dtype) or utils.is_complex_dtype(dtype),
|
39 |
+
lambda: f"{fn_name}: dtype should be floating point or complex. Got {dtype}",
|
40 |
+
)
|
41 |
+
torch._check(
|
42 |
+
utils.is_complex_dtype(dtype) == utils.is_complex_dtype(x_dtype),
|
43 |
+
lambda: "{fn_name}: dtype should be {d} for {d} inputs. Got {dtype}".format(
|
44 |
+
fn_name=fn_name,
|
45 |
+
d="complex" if utils.is_complex_dtype(x_dtype) else "real",
|
46 |
+
dtype=dtype,
|
47 |
+
),
|
48 |
+
)
|
49 |
+
torch._check(
|
50 |
+
utils.get_higher_dtype(dtype, x_dtype) == dtype,
|
51 |
+
lambda: f"{fn_name}: the dtype of the input ({x_dtype}) should be convertible "
|
52 |
+
"without narrowing to the specified dtype ({dtype})",
|
53 |
+
)
|
54 |
+
|
55 |
+
|
56 |
+
# Utilities should come BEFORE this import
|
57 |
+
from torch._decomp import register_decomposition
|
58 |
+
|
59 |
+
|
60 |
+
def diagonal(
|
61 |
+
input: TensorLikeType,
|
62 |
+
*,
|
63 |
+
offset: int = 0,
|
64 |
+
dim1: int = -2,
|
65 |
+
dim2: int = -1,
|
66 |
+
) -> TensorLikeType:
|
67 |
+
return torch.diagonal(input, offset=offset, dim1=dim1, dim2=dim2)
|
68 |
+
|
69 |
+
|
70 |
+
@register_decomposition(torch._ops.ops.aten.linalg_vector_norm)
|
71 |
+
@out_wrapper(exact_dtype=True)
|
72 |
+
def vector_norm(
|
73 |
+
x: TensorLikeType,
|
74 |
+
ord: float = 2.0,
|
75 |
+
dim: Optional[DimsType] = None,
|
76 |
+
keepdim: bool = False,
|
77 |
+
*,
|
78 |
+
dtype: Optional[torch.dtype] = None,
|
79 |
+
) -> Tensor:
|
80 |
+
# Checks
|
81 |
+
check_fp_or_complex(x.dtype, "linalg.vector_norm")
|
82 |
+
|
83 |
+
if isinstance(dim, Dim):
|
84 |
+
dim = [dim] # type: ignore[assignment]
|
85 |
+
|
86 |
+
if x.numel() == 0 and (ord < 0.0 or ord == float("inf")):
|
87 |
+
torch._check(
|
88 |
+
dim is not None and len(dim) != 0,
|
89 |
+
lambda: f"linalg.vector_norm cannot compute the {ord} norm on an empty tensor "
|
90 |
+
"because the operation does not have an identity",
|
91 |
+
)
|
92 |
+
shape = x.shape
|
93 |
+
assert dim is not None # mypy does not seem to be able to see through check?
|
94 |
+
for d in dim:
|
95 |
+
torch._check(
|
96 |
+
shape[d] != 0,
|
97 |
+
lambda: f"linalg.vector_norm cannot compute the {ord} norm on the "
|
98 |
+
f"dimension {d} because this dimension is empty and the "
|
99 |
+
"operation does not have an identity",
|
100 |
+
)
|
101 |
+
_check_norm_dtype(dtype, x.dtype, "linalg.vector_norm")
|
102 |
+
|
103 |
+
computation_dtype, result_dtype = utils.reduction_dtypes(
|
104 |
+
x, utils.REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT, dtype
|
105 |
+
)
|
106 |
+
|
107 |
+
to_result_dtype = partial(_maybe_convert_to_dtype, dtype=result_dtype)
|
108 |
+
|
109 |
+
# Implementation
|
110 |
+
if ord == 0.0:
|
111 |
+
return torch.sum(torch.ne(x, 0.0), dim=dim, keepdim=keepdim, dtype=result_dtype)
|
112 |
+
elif ord == float("inf"):
|
113 |
+
return to_result_dtype(torch.amax(torch.abs(x), dim=dim, keepdim=keepdim)) # type: ignore[return-value,arg-type]
|
114 |
+
elif ord == float("-inf"):
|
115 |
+
return to_result_dtype(torch.amin(torch.abs(x), dim=dim, keepdim=keepdim)) # type: ignore[return-value,arg-type]
|
116 |
+
else:
|
117 |
+
# From here on the computation dtype is important as the reduction is non-trivial
|
118 |
+
x = _maybe_convert_to_dtype(x, computation_dtype) # type: ignore[assignment]
|
119 |
+
reduce_sum = partial(torch.sum, dim=dim, keepdim=keepdim)
|
120 |
+
|
121 |
+
if not (ord % 2.0 == 0.0 and utils.is_float_dtype(x.dtype)):
|
122 |
+
x = torch.abs(x)
|
123 |
+
return to_result_dtype(torch.pow(reduce_sum(torch.pow(x, ord)), 1.0 / ord)) # type: ignore[return-value]
|
124 |
+
|
125 |
+
|
126 |
+
def _backshift_permutation(dim0, dim1, ndim):
|
127 |
+
# Auxiliary function for matrix_norm
|
128 |
+
# Computes the permutation that moves the two given dimensions to the back
|
129 |
+
ret = [i for i in range(ndim) if i != dim0 and i != dim1]
|
130 |
+
ret.extend((dim0, dim1))
|
131 |
+
return ret
|
132 |
+
|
133 |
+
|
134 |
+
def _inverse_permutation(perm):
|
135 |
+
# Given a permutation, returns its inverse. It's equivalent to argsort on an array
|
136 |
+
return [i for i, j in sorted(enumerate(perm), key=lambda i_j: i_j[1])]
|
137 |
+
|
138 |
+
|
139 |
+
# CompositeImplicitAutograd
|
140 |
+
@out_wrapper(exact_dtype=True)
|
141 |
+
def matrix_norm(
|
142 |
+
A: TensorLikeType,
|
143 |
+
ord: Union[float, str] = "fro",
|
144 |
+
dim: DimsType = (-2, -1),
|
145 |
+
keepdim: bool = False,
|
146 |
+
*,
|
147 |
+
dtype: Optional[torch.dtype] = None,
|
148 |
+
) -> TensorLikeType:
|
149 |
+
# shape
|
150 |
+
check_is_matrix(A, "linalg.matrix_norm")
|
151 |
+
# dim
|
152 |
+
dim = utils.canonicalize_dims(A.ndim, dim)
|
153 |
+
if isinstance(dim, Dim):
|
154 |
+
dim = (dim,) # type: ignore[assignment]
|
155 |
+
torch._check(
|
156 |
+
len(dim) == 2, lambda: "linalg.matrix_norm: dim must be a 2-tuple. Got {dim}"
|
157 |
+
)
|
158 |
+
torch._check(
|
159 |
+
dim[0] != dim[1],
|
160 |
+
lambda: "linalg.matrix_norm: dims must be different. Got ({dim[0]}, {dim[1]})",
|
161 |
+
)
|
162 |
+
# dtype arg
|
163 |
+
_check_norm_dtype(dtype, A.dtype, "linalg.matrix_norm")
|
164 |
+
|
165 |
+
if isinstance(ord, str):
|
166 |
+
# ord
|
167 |
+
torch._check(
|
168 |
+
ord in ("fro", "nuc"),
|
169 |
+
lambda: "linalg.matrix_norm: Order {ord} not supported.",
|
170 |
+
)
|
171 |
+
# dtype
|
172 |
+
check_fp_or_complex(
|
173 |
+
A.dtype, "linalg.matrix_norm", allow_low_precision_dtypes=ord != "nuc"
|
174 |
+
)
|
175 |
+
|
176 |
+
if ord == "fro":
|
177 |
+
return vector_norm(A, 2, dim, keepdim, dtype=dtype)
|
178 |
+
else: # ord == "nuc"
|
179 |
+
if dtype is not None:
|
180 |
+
A = _maybe_convert_to_dtype(A, dtype) # type: ignore[assignment]
|
181 |
+
perm = _backshift_permutation(dim[0], dim[1], A.ndim)
|
182 |
+
result = torch.sum(svdvals(prims.transpose(A, perm)), -1, keepdim)
|
183 |
+
if keepdim:
|
184 |
+
inv_perm = _inverse_permutation(perm)
|
185 |
+
result = prims.transpose(torch.unsqueeze(result, -1), inv_perm)
|
186 |
+
return result
|
187 |
+
else:
|
188 |
+
# ord
|
189 |
+
abs_ord = abs(ord)
|
190 |
+
torch._check(
|
191 |
+
abs_ord in (2, 1, float("inf")),
|
192 |
+
lambda: "linalg.matrix_norm: Order {ord} not supported.",
|
193 |
+
)
|
194 |
+
# dtype
|
195 |
+
check_fp_or_complex(
|
196 |
+
A.dtype, "linalg.matrix_norm", allow_low_precision_dtypes=ord != 2
|
197 |
+
)
|
198 |
+
|
199 |
+
max_min = partial(torch.amax if ord > 0.0 else torch.amin, keepdim=keepdim)
|
200 |
+
|
201 |
+
if abs_ord == 2.0:
|
202 |
+
if dtype is not None:
|
203 |
+
A = _maybe_convert_to_dtype(A, dtype) # type: ignore[assignment]
|
204 |
+
perm = _backshift_permutation(dim[0], dim[1], A.ndim)
|
205 |
+
result = max_min(svdvals(prims.transpose(A, perm)), dim=-1)
|
206 |
+
if keepdim:
|
207 |
+
inv_perm = _inverse_permutation(perm)
|
208 |
+
result = prims.transpose(torch.unsqueeze(result, -1), inv_perm)
|
209 |
+
return result
|
210 |
+
else: # 1, -1, inf, -inf
|
211 |
+
dim0, dim1 = dim
|
212 |
+
if abs_ord == float("inf"):
|
213 |
+
dim0, dim1 = dim1, dim0
|
214 |
+
if not keepdim and (dim0 < dim1):
|
215 |
+
dim1 -= 1
|
216 |
+
return max_min(
|
217 |
+
vector_norm(A, 1.0, dim=dim0, keepdim=keepdim, dtype=dtype), dim1
|
218 |
+
)
|
219 |
+
|
220 |
+
|
221 |
+
# CompositeImplicitAutograd
|
222 |
+
@out_wrapper(exact_dtype=True)
|
223 |
+
def norm(
|
224 |
+
A: TensorLikeType,
|
225 |
+
ord: Optional[Union[float, str]] = None,
|
226 |
+
dim: Optional[DimsType] = None,
|
227 |
+
keepdim: bool = False,
|
228 |
+
*,
|
229 |
+
dtype: Optional[torch.dtype] = None,
|
230 |
+
) -> TensorLikeType:
|
231 |
+
if dim is not None:
|
232 |
+
if isinstance(dim, Dim):
|
233 |
+
dim = (dim,) # type: ignore[assignment]
|
234 |
+
torch._check(
|
235 |
+
len(dim) in (1, 2),
|
236 |
+
lambda: "linalg.norm: If dim is specified, it must be of length 1 or 2. Got {dim}",
|
237 |
+
)
|
238 |
+
elif ord is not None:
|
239 |
+
torch._check(
|
240 |
+
A.ndim in (1, 2),
|
241 |
+
lambda: "linalg.norm: If dim is not specified but ord is, the input must be 1D or 2D. Got {A.ndim}D",
|
242 |
+
)
|
243 |
+
|
244 |
+
if ord is not None and (
|
245 |
+
(dim is not None and len(dim) == 2) or (dim is None and A.ndim == 2)
|
246 |
+
):
|
247 |
+
if dim is None:
|
248 |
+
dim = (0, 1)
|
249 |
+
return matrix_norm(A, ord, dim, keepdim, dtype=dtype)
|
250 |
+
else:
|
251 |
+
if ord is None:
|
252 |
+
ord = 2.0
|
253 |
+
return vector_norm(A, ord, dim, keepdim, dtype=dtype)
|
254 |
+
|
255 |
+
|
256 |
+
# CompositeImplicitAutograd
|
257 |
+
@out_wrapper("U", "S", "Vh", exact_dtype=True)
|
258 |
+
def svd(A: TensorLikeType, full_matrices: bool = True) -> Tuple[Tensor, Tensor, Tensor]:
|
259 |
+
return prims.svd(A, full_matrices=full_matrices)
|
260 |
+
|
261 |
+
|
262 |
+
# CompositeImplicitAutograd
|
263 |
+
@out_wrapper(exact_dtype=True)
|
264 |
+
def svdvals(A: TensorLikeType) -> Tensor:
|
265 |
+
return svd(A, full_matrices=False)[1]
|
266 |
+
|
267 |
+
|
268 |
+
# CompositeImplicitAutograd
|
269 |
+
@out_wrapper()
|
270 |
+
@elementwise_type_promotion_wrapper(
|
271 |
+
type_promoting_args=("x", "y"),
|
272 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
273 |
+
)
|
274 |
+
def vecdot(x: Tensor, y: Tensor, dim: int = -1) -> Tensor:
|
275 |
+
check_fp_or_complex(x.dtype, "linalg.vecdot")
|
276 |
+
return (x.conj() * y).sum(dim=dim)
|
env-llmeval/lib/python3.10/site-packages/torch/_refs/linalg/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (8.06 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_refs/nn/__init__.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List
|
2 |
+
|
3 |
+
__all__: List[str] = []
|
env-llmeval/lib/python3.10/site-packages/torch/_refs/nn/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (270 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_refs/nn/functional/__init__.py
ADDED
@@ -0,0 +1,1174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
from functools import wraps
|
3 |
+
from typing import Callable, Optional, Union
|
4 |
+
|
5 |
+
import torch
|
6 |
+
import torch._prims as prims
|
7 |
+
import torch._prims_common as utils
|
8 |
+
import torch._refs as refs
|
9 |
+
from torch._decomp import register_decomposition
|
10 |
+
from torch._prims_common import (
|
11 |
+
ELEMENTWISE_TYPE_PROMOTION_KIND,
|
12 |
+
NumberType,
|
13 |
+
ShapeType,
|
14 |
+
TensorLike,
|
15 |
+
TensorLikeType,
|
16 |
+
)
|
17 |
+
from torch._prims_common.wrappers import (
|
18 |
+
elementwise_type_promotion_wrapper,
|
19 |
+
elementwise_unary_scalar_wrapper,
|
20 |
+
out_wrapper,
|
21 |
+
)
|
22 |
+
from torch._refs import _make_inplace
|
23 |
+
|
24 |
+
__all__ = [
|
25 |
+
"alpha_dropout",
|
26 |
+
"celu",
|
27 |
+
"celu_",
|
28 |
+
"dropout",
|
29 |
+
"elu",
|
30 |
+
"elu_",
|
31 |
+
"gelu",
|
32 |
+
"glu",
|
33 |
+
"group_norm",
|
34 |
+
"hardshrink",
|
35 |
+
"hardtanh",
|
36 |
+
"hinge_embedding_loss",
|
37 |
+
"huber_loss",
|
38 |
+
"l1_loss",
|
39 |
+
"layer_norm",
|
40 |
+
"leaky_relu",
|
41 |
+
"log_softmax",
|
42 |
+
"margin_ranking_loss",
|
43 |
+
"mish",
|
44 |
+
"mish_",
|
45 |
+
"mse_loss",
|
46 |
+
"nll_loss",
|
47 |
+
"pairwise_distance",
|
48 |
+
"pdist",
|
49 |
+
"poisson_nll_loss",
|
50 |
+
"prelu",
|
51 |
+
"relu",
|
52 |
+
"relu6",
|
53 |
+
"selu",
|
54 |
+
"selu_",
|
55 |
+
"smooth_l1_loss",
|
56 |
+
"softmax",
|
57 |
+
"softmin",
|
58 |
+
"softplus",
|
59 |
+
"softshrink",
|
60 |
+
"tanhshrink",
|
61 |
+
"threshold",
|
62 |
+
"threshold_",
|
63 |
+
"triplet_margin_loss",
|
64 |
+
]
|
65 |
+
|
66 |
+
Tensor = torch.Tensor
|
67 |
+
aten = torch._ops.ops.aten
|
68 |
+
DispatchKey = torch._C.DispatchKey # type: ignore[attr-defined]
|
69 |
+
|
70 |
+
|
71 |
+
def _dropout_helper(
|
72 |
+
self: TensorLikeType,
|
73 |
+
val: float,
|
74 |
+
) -> TensorLikeType:
|
75 |
+
"""
|
76 |
+
Helper function for all dropout-type operators. During training,
|
77 |
+
some of the elements of the input tensor are randomly masked.
|
78 |
+
|
79 |
+
Returns the masked tensor of the boolean values.
|
80 |
+
|
81 |
+
"""
|
82 |
+
|
83 |
+
return (
|
84 |
+
refs._uniform_helper(
|
85 |
+
self.shape, low=0.0, high=1.0, dtype=torch.float32, device=self.device
|
86 |
+
)
|
87 |
+
< val
|
88 |
+
)
|
89 |
+
|
90 |
+
|
91 |
+
@register_decomposition(aten.alpha_dropout)
|
92 |
+
def alpha_dropout(
|
93 |
+
self: TensorLikeType, p: float = 0.5, training: bool = False, inplace: bool = False
|
94 |
+
) -> TensorLikeType:
|
95 |
+
if inplace:
|
96 |
+
raise NotImplementedError
|
97 |
+
|
98 |
+
if not training:
|
99 |
+
return self
|
100 |
+
|
101 |
+
torch._check(
|
102 |
+
p <= 1 and p >= 0,
|
103 |
+
lambda: f"dropout probability has to be between 0 and 1, but got, {p}",
|
104 |
+
)
|
105 |
+
|
106 |
+
if p == 1:
|
107 |
+
return torch.zeros_like(self)
|
108 |
+
|
109 |
+
if p == 0:
|
110 |
+
return self
|
111 |
+
|
112 |
+
dropout_mask = _dropout_helper(self, 1 - p)
|
113 |
+
|
114 |
+
# From paper: Self-Normalizing Neural Networks (https://arxiv.org/pdf/1706.02515.pdf)
|
115 |
+
# alpha = - SELU.alpha * SELU.scale, here
|
116 |
+
# SELU.alpha = 1.6732632423543772848170429916717 and
|
117 |
+
# SELU.scale = 1.0507009873554804934193349852946
|
118 |
+
alpha = -1.7580993408473766
|
119 |
+
|
120 |
+
a = 1.0 / math.sqrt((alpha * alpha * p + 1) * (1 - p))
|
121 |
+
b = torch.logical_not(dropout_mask)
|
122 |
+
b = b * (alpha * a) + alpha * a * p
|
123 |
+
dropout_mask = a * dropout_mask
|
124 |
+
|
125 |
+
return self * dropout_mask + b
|
126 |
+
|
127 |
+
|
128 |
+
def _inplace_wrapper(fn):
|
129 |
+
"""
|
130 |
+
Given a nn.functional non-linearity, implements its `inplace: bool` argument
|
131 |
+
"""
|
132 |
+
|
133 |
+
# nb. We use the name of the first argument used in the unary references
|
134 |
+
@wraps(fn)
|
135 |
+
def _fn(a, *args, inplace=False, **kwargs):
|
136 |
+
if inplace:
|
137 |
+
torch._check(
|
138 |
+
"out" not in kwargs,
|
139 |
+
lambda: "Cannot set inplace=True and pass out= at the same time",
|
140 |
+
)
|
141 |
+
return fn(a, *args, inplace=False, out=a, **kwargs)
|
142 |
+
else:
|
143 |
+
return fn(a, *args, inplace=False, **kwargs)
|
144 |
+
|
145 |
+
return _fn
|
146 |
+
|
147 |
+
|
148 |
+
# celu is implemented specially because it has an alpha argument
|
149 |
+
# celu is very similar to elu
|
150 |
+
@register_decomposition(aten.celu)
|
151 |
+
@_inplace_wrapper
|
152 |
+
@out_wrapper()
|
153 |
+
@elementwise_type_promotion_wrapper(
|
154 |
+
type_promoting_args=("a",),
|
155 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
156 |
+
)
|
157 |
+
def celu(
|
158 |
+
a: TensorLikeType, alpha: Optional[NumberType] = None, inplace: bool = False
|
159 |
+
) -> TensorLikeType:
|
160 |
+
"""
|
161 |
+
Reference implementation of torch.nn.functional.celu
|
162 |
+
"""
|
163 |
+
|
164 |
+
if inplace:
|
165 |
+
raise NotImplementedError
|
166 |
+
|
167 |
+
rhs: TensorLikeType
|
168 |
+
if alpha is not None:
|
169 |
+
python_type = utils.dtype_to_type(a.dtype)
|
170 |
+
if not utils.is_weakly_lesser_type(type(alpha), python_type):
|
171 |
+
msg = f"alpha argument of type {type(alpha)} cannot be safely cast to type {python_type}!"
|
172 |
+
raise ValueError(msg)
|
173 |
+
rhs = alpha * torch.expm1(torch.true_divide(a, alpha)) # type: ignore[arg-type]
|
174 |
+
else:
|
175 |
+
rhs = torch.expm1(a)
|
176 |
+
|
177 |
+
return torch.where(a > 0, a, rhs)
|
178 |
+
|
179 |
+
|
180 |
+
@_inplace_wrapper
|
181 |
+
@out_wrapper()
|
182 |
+
def dropout(
|
183 |
+
a: TensorLikeType, p: float = 0.5, training: bool = True, inplace: bool = False
|
184 |
+
) -> TensorLikeType:
|
185 |
+
if inplace:
|
186 |
+
raise NotImplementedError
|
187 |
+
|
188 |
+
if not training:
|
189 |
+
return a
|
190 |
+
|
191 |
+
torch._check(
|
192 |
+
p <= 1 and p >= 0,
|
193 |
+
lambda: f"dropout probability has to be between 0 and 1, but got, {p}",
|
194 |
+
)
|
195 |
+
|
196 |
+
if p == 1:
|
197 |
+
return torch.zeros_like(a)
|
198 |
+
|
199 |
+
if p == 0:
|
200 |
+
return a
|
201 |
+
|
202 |
+
scale = 1 / (1 - p)
|
203 |
+
dropout_mask = _dropout_helper(a, 1 - p)
|
204 |
+
|
205 |
+
return a * dropout_mask * scale
|
206 |
+
|
207 |
+
|
208 |
+
@register_decomposition(aten.elu)
|
209 |
+
@_inplace_wrapper
|
210 |
+
@out_wrapper()
|
211 |
+
@elementwise_type_promotion_wrapper(
|
212 |
+
type_promoting_args=("a",),
|
213 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
214 |
+
)
|
215 |
+
def elu(
|
216 |
+
a: TensorLikeType,
|
217 |
+
alpha: NumberType = 1.0,
|
218 |
+
scale: NumberType = 1.0,
|
219 |
+
input_scale: NumberType = 1.0,
|
220 |
+
inplace: bool = False,
|
221 |
+
) -> TensorLikeType:
|
222 |
+
"""
|
223 |
+
Reference implementation of torch.nn.functional.elu
|
224 |
+
"""
|
225 |
+
if inplace:
|
226 |
+
raise NotImplementedError
|
227 |
+
|
228 |
+
# nb. This should be factored out into a can_cast aux function
|
229 |
+
python_type = utils.dtype_to_type(a.dtype)
|
230 |
+
torch._check(
|
231 |
+
utils.is_weakly_lesser_type(type(input_scale), python_type),
|
232 |
+
lambda: f"input_scale argument of type {type(input_scale)} cannot be safely cast to type {python_type}!",
|
233 |
+
)
|
234 |
+
torch._check(
|
235 |
+
utils.is_weakly_lesser_type(type(scale), python_type),
|
236 |
+
lambda: f"scale argument of type {type(scale)} cannot be safely cast to type {python_type}!",
|
237 |
+
)
|
238 |
+
torch._check(
|
239 |
+
utils.is_weakly_lesser_type(type(alpha), python_type),
|
240 |
+
lambda: f"alpha argument of type {type(alpha)} cannot be safely cast to type {python_type}!",
|
241 |
+
)
|
242 |
+
|
243 |
+
return torch.where(a > 0, scale * a, (alpha * scale) * torch.expm1(a * input_scale))
|
244 |
+
|
245 |
+
|
246 |
+
@register_decomposition(aten.relu)
|
247 |
+
@_inplace_wrapper
|
248 |
+
@out_wrapper()
|
249 |
+
@elementwise_type_promotion_wrapper(
|
250 |
+
type_promoting_args=("a",),
|
251 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
252 |
+
)
|
253 |
+
def relu(a: TensorLikeType, inplace: bool = False) -> TensorLikeType:
|
254 |
+
"""
|
255 |
+
Reference implementation of torch.nn.functional.relu
|
256 |
+
"""
|
257 |
+
|
258 |
+
if inplace:
|
259 |
+
raise NotImplementedError
|
260 |
+
|
261 |
+
return torch.where(torch.le(a, 0), 0, a)
|
262 |
+
|
263 |
+
|
264 |
+
def group_norm(
|
265 |
+
input: Tensor,
|
266 |
+
num_groups: int,
|
267 |
+
weight: Optional[Tensor] = None,
|
268 |
+
bias: Optional[Tensor] = None,
|
269 |
+
eps: float = 1e-5,
|
270 |
+
) -> Tensor:
|
271 |
+
"""
|
272 |
+
Reference implementation of :func:`torch.nn.functional.group_norm`.
|
273 |
+
"""
|
274 |
+
torch._check(
|
275 |
+
input.ndim >= 2,
|
276 |
+
lambda: f"Expected at least 2 dimensions for input tensor but received {input.ndim}",
|
277 |
+
)
|
278 |
+
|
279 |
+
batch_size = input.shape[0]
|
280 |
+
num_channels = input.shape[1]
|
281 |
+
torch._check(
|
282 |
+
num_channels % num_groups == 0,
|
283 |
+
lambda: "Expected number of channels in input to be divisible by num_groups, "
|
284 |
+
+ f"but got input of shape {input.shape} and num_groups = {num_groups}",
|
285 |
+
)
|
286 |
+
|
287 |
+
# input shape is (N, C, *), so we flatten all inner dimensions except (N, C)
|
288 |
+
flattened_inner_size = 1
|
289 |
+
for dim_length in input.shape[2:]:
|
290 |
+
flattened_inner_size *= dim_length
|
291 |
+
|
292 |
+
return torch.native_group_norm(
|
293 |
+
input,
|
294 |
+
weight,
|
295 |
+
bias,
|
296 |
+
batch_size,
|
297 |
+
num_channels,
|
298 |
+
flattened_inner_size,
|
299 |
+
num_groups,
|
300 |
+
eps,
|
301 |
+
)[0]
|
302 |
+
|
303 |
+
|
304 |
+
def layer_norm(
|
305 |
+
input: Tensor,
|
306 |
+
normalized_shape: ShapeType,
|
307 |
+
weight: Optional[Tensor] = None,
|
308 |
+
bias: Optional[Tensor] = None,
|
309 |
+
eps: float = 1e-5,
|
310 |
+
) -> Tensor:
|
311 |
+
"""
|
312 |
+
Reference implementation of :func:`torch.nn.functional.layer_norm`.
|
313 |
+
"""
|
314 |
+
return torch.native_layer_norm(input, normalized_shape, weight, bias, eps)[0]
|
315 |
+
|
316 |
+
|
317 |
+
@register_decomposition(aten.leaky_relu)
|
318 |
+
@_inplace_wrapper
|
319 |
+
@out_wrapper()
|
320 |
+
@elementwise_type_promotion_wrapper(
|
321 |
+
type_promoting_args=("a",),
|
322 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
323 |
+
)
|
324 |
+
def leaky_relu(
|
325 |
+
a: TensorLikeType, negative_slope: float = 0.01, inplace: bool = False
|
326 |
+
) -> TensorLikeType:
|
327 |
+
"""
|
328 |
+
Reference implementation of torch.nn.functional.leaky_relu
|
329 |
+
"""
|
330 |
+
|
331 |
+
if inplace:
|
332 |
+
raise NotImplementedError
|
333 |
+
|
334 |
+
python_type = utils.dtype_to_type(a.dtype)
|
335 |
+
if not utils.is_weakly_lesser_type(type(negative_slope), python_type):
|
336 |
+
msg = f"negative_slope argument of type {type(negative_slope)} cannot be safely cast to type {python_type}!"
|
337 |
+
raise ValueError(msg)
|
338 |
+
return torch.where(torch.gt(a, 0), a, torch.mul(a, negative_slope))
|
339 |
+
|
340 |
+
|
341 |
+
@register_decomposition(aten.mish)
|
342 |
+
@_inplace_wrapper
|
343 |
+
@out_wrapper()
|
344 |
+
@elementwise_type_promotion_wrapper(
|
345 |
+
type_promoting_args=("a",),
|
346 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
347 |
+
)
|
348 |
+
def mish(a: TensorLikeType, inplace: bool = False) -> TensorLikeType:
|
349 |
+
"""
|
350 |
+
Reference implementation of torch.nn.functional.mish
|
351 |
+
"""
|
352 |
+
|
353 |
+
if inplace:
|
354 |
+
raise NotImplementedError
|
355 |
+
return a * torch.tanh(torch.nn.functional.softplus(a))
|
356 |
+
|
357 |
+
|
358 |
+
@register_decomposition(aten.selu)
|
359 |
+
@_inplace_wrapper
|
360 |
+
@out_wrapper()
|
361 |
+
@elementwise_type_promotion_wrapper(
|
362 |
+
type_promoting_args=("a",),
|
363 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
364 |
+
)
|
365 |
+
def selu(a: TensorLikeType, inplace: bool = False) -> TensorLikeType:
|
366 |
+
"""
|
367 |
+
Reference implementation of torch.nn.functional.selu
|
368 |
+
"""
|
369 |
+
if inplace:
|
370 |
+
raise NotImplementedError
|
371 |
+
|
372 |
+
alpha = 1.6732632423543772848170429916717
|
373 |
+
scale = 1.0507009873554804934193349852946
|
374 |
+
|
375 |
+
rhs = alpha * torch.expm1(a)
|
376 |
+
|
377 |
+
return scale * torch.where(a > 0, a, rhs)
|
378 |
+
|
379 |
+
|
380 |
+
# Forwarding alias: the functional variant doesn't support the out kwarg
|
381 |
+
# CompositeImplicitAutograd - don't register decomp
|
382 |
+
def softmax(
|
383 |
+
a: TensorLikeType,
|
384 |
+
dim: Optional[int] = None,
|
385 |
+
_stacklevel: int = 3, # for compat when using TorchRefsMode(strict=True)
|
386 |
+
dtype: Optional[torch.dtype] = None,
|
387 |
+
) -> TensorLikeType:
|
388 |
+
# The error is for compat with regular PyTorch, which has this behavior
|
389 |
+
# deprecated. For PrimTorch, it's fine to drop support for deprecated
|
390 |
+
# behavior because it requires explicit opt in. This error is to inform
|
391 |
+
# users how to update their calls.
|
392 |
+
torch._check(dim is not None, lambda: "implicit dim not supported, use dim=X")
|
393 |
+
return torch.softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload]
|
394 |
+
|
395 |
+
|
396 |
+
# CompositeImplicitAutograd - don't register decomp
|
397 |
+
def softmin(
|
398 |
+
a: TensorLikeType,
|
399 |
+
dim: Optional[int] = None,
|
400 |
+
_stacklevel: int = 3, # for compat when using TorchRefsMode(strict=True)
|
401 |
+
dtype: Optional[torch.dtype] = None,
|
402 |
+
) -> TensorLikeType:
|
403 |
+
# The error is for compat with regular PyTorch, which has this behavior
|
404 |
+
# deprecated. For PrimTorch, it's fine to drop support for deprecated
|
405 |
+
# behavior because it requires explicit opt in. This error is to inform
|
406 |
+
# users how to update their calls.
|
407 |
+
torch._check(dim is not None, lambda: "implicit dim not supported, use dim=X")
|
408 |
+
return torch.softmax(a=-a, dim=dim, dtype=dtype) # type: ignore[call-overload]
|
409 |
+
|
410 |
+
|
411 |
+
# softplus is implemented specially because it has beta and threshold arguments
|
412 |
+
@register_decomposition(aten.softplus)
|
413 |
+
@_inplace_wrapper
|
414 |
+
@out_wrapper()
|
415 |
+
@elementwise_type_promotion_wrapper(
|
416 |
+
type_promoting_args=("a",),
|
417 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
418 |
+
)
|
419 |
+
def softplus(
|
420 |
+
a: TensorLikeType,
|
421 |
+
beta: Optional[NumberType] = None,
|
422 |
+
threshold: NumberType = 20,
|
423 |
+
inplace: bool = False,
|
424 |
+
) -> TensorLikeType:
|
425 |
+
"""
|
426 |
+
Reference implementation of torch.nn.functional.softplus
|
427 |
+
"""
|
428 |
+
|
429 |
+
if inplace:
|
430 |
+
raise NotImplementedError
|
431 |
+
|
432 |
+
rhs: TensorLikeType
|
433 |
+
if beta is not None:
|
434 |
+
python_type = utils.dtype_to_type(a.dtype)
|
435 |
+
if not utils.is_weakly_lesser_type(type(beta), python_type):
|
436 |
+
msg = f"beta argument of type {type(beta)} cannot be safely cast to type {python_type}!"
|
437 |
+
raise ValueError(msg)
|
438 |
+
scaled_input = a * beta
|
439 |
+
rhs = torch.true_divide(torch.log1p(torch.exp(scaled_input)), beta) # type: ignore[arg-type]
|
440 |
+
|
441 |
+
else:
|
442 |
+
scaled_input = a
|
443 |
+
rhs = torch.log1p(torch.exp(scaled_input))
|
444 |
+
|
445 |
+
return torch.where(scaled_input > threshold, a, rhs)
|
446 |
+
|
447 |
+
|
448 |
+
@aten.hardshrink.default.py_impl(DispatchKey.Autograd)
|
449 |
+
@register_decomposition(aten.hardshrink)
|
450 |
+
@out_wrapper()
|
451 |
+
def hardshrink(a: TensorLikeType, lambd: float = 0.5):
|
452 |
+
# Formula for reference,
|
453 |
+
# hardshrink(x) = x if x > lambd
|
454 |
+
# = x if x < -lambd
|
455 |
+
# = 0 otherwise
|
456 |
+
return torch.where(torch.abs(a) <= lambd, 0, a)
|
457 |
+
|
458 |
+
|
459 |
+
@aten.softshrink.default.py_impl(DispatchKey.Autograd)
|
460 |
+
@register_decomposition(aten.softshrink)
|
461 |
+
@out_wrapper()
|
462 |
+
def softshrink(a: TensorLikeType, lambd: float = 0.5):
|
463 |
+
# Formula for reference,
|
464 |
+
# softshrink(x) = x - lambd if x > lambd
|
465 |
+
# = x + lambd if x < -lambd
|
466 |
+
# = 0 otherwise
|
467 |
+
torch._check(
|
468 |
+
lambd >= 0,
|
469 |
+
lambda: f"lambda must be greater or equal to 0, but found to be {lambd}",
|
470 |
+
)
|
471 |
+
# We implement this in one torch.where to generate better code in the backward
|
472 |
+
# see https://github.com/pytorch/pytorch/pull/107052#discussion_r1293748211
|
473 |
+
return torch.where(torch.abs(a) > lambd, a - torch.sign(a) * lambd, 0)
|
474 |
+
|
475 |
+
|
476 |
+
# Losses
|
477 |
+
def _reduction_int_to_str(reduction: int) -> str:
|
478 |
+
from torch._decomp.decompositions import Reduction
|
479 |
+
|
480 |
+
if reduction == Reduction.NONE.value:
|
481 |
+
return "none"
|
482 |
+
elif reduction == Reduction.MEAN.value:
|
483 |
+
return "mean"
|
484 |
+
elif reduction == Reduction.SUM.value:
|
485 |
+
return "sum"
|
486 |
+
else:
|
487 |
+
raise ValueError(f"{reduction} is not a valid value for reduction")
|
488 |
+
|
489 |
+
|
490 |
+
def _apply_loss_reduction(loss: TensorLikeType, reduction: str) -> TensorLikeType:
|
491 |
+
if reduction == "sum":
|
492 |
+
return torch.sum(loss)
|
493 |
+
elif reduction == "mean":
|
494 |
+
return torch.mean(loss)
|
495 |
+
else: # reduction == "none"
|
496 |
+
return loss
|
497 |
+
|
498 |
+
|
499 |
+
def _check_reduction_value(reduction: str):
|
500 |
+
if reduction not in ("mean", "sum", "none"):
|
501 |
+
raise ValueError(f"{reduction} is not a valid value for reduction")
|
502 |
+
|
503 |
+
|
504 |
+
# This helper function maps depreciated arguments, "size_average" and "reduce"
|
505 |
+
# to their corresponding "reduction" string argument
|
506 |
+
def _get_string_reduction_arg(
|
507 |
+
*, size_average: Optional[bool], reduce: Optional[bool]
|
508 |
+
) -> str:
|
509 |
+
if size_average is None:
|
510 |
+
size_average = True
|
511 |
+
if reduce is None:
|
512 |
+
reduce = True
|
513 |
+
if size_average and reduce:
|
514 |
+
ret = "mean"
|
515 |
+
elif reduce:
|
516 |
+
ret = "sum"
|
517 |
+
else:
|
518 |
+
ret = "none"
|
519 |
+
return ret
|
520 |
+
|
521 |
+
|
522 |
+
# CompositeImplicitAutograd - don't register decomp
|
523 |
+
@elementwise_type_promotion_wrapper(
|
524 |
+
type_promoting_args=("input", "target"),
|
525 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT,
|
526 |
+
)
|
527 |
+
def l1_loss(
|
528 |
+
input: TensorLikeType,
|
529 |
+
target: TensorLikeType,
|
530 |
+
size_average: Optional[bool] = None,
|
531 |
+
reduce: Optional[bool] = None,
|
532 |
+
reduction: str = "mean",
|
533 |
+
) -> TensorLikeType:
|
534 |
+
"""
|
535 |
+
Reference implementation of torch.nn.functional.l1_loss
|
536 |
+
"""
|
537 |
+
if size_average is not None or reduce is not None:
|
538 |
+
# TODO: Raise exception instead of converting value. This is only for
|
539 |
+
# primTorch since it can drop support for deprecated arguments.
|
540 |
+
# msg = "size_average and reduce args are deprecated, please use reduction argument."
|
541 |
+
reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce)
|
542 |
+
_check_reduction_value(reduction)
|
543 |
+
loss = torch.abs(input - target)
|
544 |
+
return _apply_loss_reduction(loss, reduction)
|
545 |
+
|
546 |
+
|
547 |
+
@elementwise_type_promotion_wrapper(
|
548 |
+
type_promoting_args=("input", "target"),
|
549 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT,
|
550 |
+
)
|
551 |
+
def smooth_l1_loss(
|
552 |
+
input: TensorLikeType,
|
553 |
+
target: TensorLikeType,
|
554 |
+
size_average: Optional[bool] = None,
|
555 |
+
reduce: Optional[bool] = None,
|
556 |
+
reduction: str = "mean",
|
557 |
+
beta: float = 1.0,
|
558 |
+
) -> TensorLikeType:
|
559 |
+
"""
|
560 |
+
Reference implementation of torch.nn.functional.smooth_l1_loss
|
561 |
+
"""
|
562 |
+
if size_average is not None or reduce is not None:
|
563 |
+
# TODO: Raise exception instead of converting value. This is only for
|
564 |
+
# primTorch since it can drop support for deprecated arguments.
|
565 |
+
# msg = "size_average and reduce args are deprecated, please use reduction argument."
|
566 |
+
reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce)
|
567 |
+
_check_reduction_value(reduction)
|
568 |
+
|
569 |
+
if beta == 0.0:
|
570 |
+
return torch.nn.functional.l1_loss(
|
571 |
+
input, target, size_average=size_average, reduce=reduce, reduction=reduction
|
572 |
+
)
|
573 |
+
else:
|
574 |
+
loss = torch.abs(input - target)
|
575 |
+
loss = torch.where(loss < beta, 0.5 * loss**2 / beta, loss - 0.5 * beta)
|
576 |
+
return _apply_loss_reduction(loss, reduction)
|
577 |
+
|
578 |
+
|
579 |
+
# Forwarding alias: the functional variant doesn't support the out kwarg
|
580 |
+
# CompositeImplicitAutograd - don't register decomp
|
581 |
+
def log_softmax(
|
582 |
+
a: TensorLikeType,
|
583 |
+
dim: Optional[int] = None,
|
584 |
+
_stacklevel: int = 3, # for compat when using TorchRefsMode(strict=True)
|
585 |
+
dtype: Optional[torch.dtype] = None,
|
586 |
+
) -> TensorLikeType:
|
587 |
+
# The error is for compat with regular PyTorch, which has this behavior
|
588 |
+
# deprecated. For PrimTorch, it's fine to drop support for deprecated
|
589 |
+
# behavior because it requires explicit opt in. This error is to inform
|
590 |
+
# users how to update their calls.
|
591 |
+
torch._check(dim is not None, lambda: "implicit dim not supported, use dim=X")
|
592 |
+
return torch.log_softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload]
|
593 |
+
|
594 |
+
|
595 |
+
@register_decomposition(aten.margin_ranking_loss)
|
596 |
+
def margin_ranking_loss(
|
597 |
+
input1: TensorLikeType,
|
598 |
+
input2: TensorLikeType,
|
599 |
+
target: TensorLikeType,
|
600 |
+
margin: float = 0.0,
|
601 |
+
reduction: str = "mean",
|
602 |
+
) -> TensorLikeType:
|
603 |
+
# loss_without_reduction = max(0, −target * (input1 − input2) + margin)
|
604 |
+
if input1.ndim != input2.ndim or input1.ndim != target.ndim:
|
605 |
+
raise RuntimeError(
|
606 |
+
"margin_ranking_loss : All input tensors should have same dimension but got sizes: "
|
607 |
+
f"input1: {input1.shape}, input2: {input2.shape}, target: {target.shape} "
|
608 |
+
)
|
609 |
+
_check_reduction_value(reduction)
|
610 |
+
loss = torch.clamp_min(-target * (input1 - input2) + margin, 0)
|
611 |
+
return _apply_loss_reduction(loss, reduction)
|
612 |
+
|
613 |
+
|
614 |
+
@elementwise_type_promotion_wrapper(
|
615 |
+
type_promoting_args=("input", "target"),
|
616 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT,
|
617 |
+
)
|
618 |
+
def mse_loss(
|
619 |
+
input: TensorLikeType,
|
620 |
+
target: TensorLikeType,
|
621 |
+
size_average: Optional[bool] = None,
|
622 |
+
reduce: Optional[bool] = None,
|
623 |
+
reduction: str = "mean",
|
624 |
+
) -> TensorLikeType:
|
625 |
+
if size_average is not None or reduce is not None:
|
626 |
+
# TODO: Raise exception instead of converting value. This is only for
|
627 |
+
# primTorch since it can drop support for deprecated arguments.
|
628 |
+
# msg = "size_average and reduce args are deprecated, please use reduction argument."
|
629 |
+
reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce)
|
630 |
+
_check_reduction_value(reduction)
|
631 |
+
loss = torch.pow(input - target, 2)
|
632 |
+
return _apply_loss_reduction(loss, reduction)
|
633 |
+
|
634 |
+
|
635 |
+
@register_decomposition(aten.hinge_embedding_loss)
|
636 |
+
def hinge_embedding_loss(
|
637 |
+
input: TensorLikeType,
|
638 |
+
target: TensorLikeType,
|
639 |
+
margin: float = 1.0,
|
640 |
+
reduction: str = "mean",
|
641 |
+
) -> TensorLikeType:
|
642 |
+
# loss_without_reduction = input if y == 1
|
643 |
+
# = max(0, margin - input) if y == -1
|
644 |
+
_check_reduction_value(reduction)
|
645 |
+
margin_clamp = torch.clamp_min(margin - input, 0)
|
646 |
+
output_margin = torch.where(target != 1, margin_clamp, 0)
|
647 |
+
output_self = torch.where(target != -1, input, 0)
|
648 |
+
loss = output_margin + output_self
|
649 |
+
return _apply_loss_reduction(loss, reduction)
|
650 |
+
|
651 |
+
|
652 |
+
def _nll_loss_nd(
|
653 |
+
input: TensorLikeType,
|
654 |
+
target: TensorLikeType,
|
655 |
+
weight: Optional[TensorLikeType],
|
656 |
+
reduction: str,
|
657 |
+
ignore_index: int,
|
658 |
+
) -> TensorLikeType:
|
659 |
+
torch._check(
|
660 |
+
input.ndim > 0 and input.ndim <= 3,
|
661 |
+
lambda: f"Expected input dimension to be either [1, 2, 3] but received {input.ndim}.",
|
662 |
+
)
|
663 |
+
|
664 |
+
torch._check(
|
665 |
+
(input.ndim == 1) or (input.shape[0] == target.shape[0]),
|
666 |
+
lambda: f"Expected input batch size {input.shape[0]} to match target batch size {target.shape[0]}.",
|
667 |
+
)
|
668 |
+
|
669 |
+
_check_reduction_value(reduction)
|
670 |
+
|
671 |
+
flat_target = torch.flatten(target)
|
672 |
+
ignore_classes_mask = torch.eq(flat_target, ignore_index)
|
673 |
+
|
674 |
+
# TODO: Enable data-dependent checks with debug mode
|
675 |
+
# TODO: This check does not work with FakeTensor inputs; See Issue #85834
|
676 |
+
# Explicit cast for class_check to bool; See Issue #78071
|
677 |
+
"""
|
678 |
+
from torch._subclasses.fake_tensor import FakeTensor
|
679 |
+
num_classes = input.shape[1] if input.ndim > 1 else input.shape[0]
|
680 |
+
valid_classes_mask = torch.logical_and(
|
681 |
+
(flat_target >= 0), (flat_target < num_classes)
|
682 |
+
)
|
683 |
+
class_check = torch.all(torch.logical_or(ignore_classes_mask, valid_classes_mask))
|
684 |
+
torch._check(
|
685 |
+
isinstance(target, FakeTensor) or bool(class_check.item()),
|
686 |
+
lambda: "A target class is out-of-bounds and not the ignore index.",
|
687 |
+
)
|
688 |
+
"""
|
689 |
+
|
690 |
+
ignore_class_weight = torch.scalar_tensor(0, dtype=input.dtype, device=input.device)
|
691 |
+
class_weight = (
|
692 |
+
torch.scalar_tensor(1, dtype=input.dtype, device=input.device)
|
693 |
+
if weight is None
|
694 |
+
else weight[flat_target]
|
695 |
+
)
|
696 |
+
current_weight = torch.where(
|
697 |
+
ignore_classes_mask,
|
698 |
+
ignore_class_weight,
|
699 |
+
class_weight,
|
700 |
+
)
|
701 |
+
|
702 |
+
if input.ndim == 1:
|
703 |
+
# implicit batch size = 1
|
704 |
+
# input (1 batch size, C classes)
|
705 |
+
loss = -input[target] * current_weight
|
706 |
+
elif input.ndim == 2:
|
707 |
+
# input (N batch size, C classes)
|
708 |
+
batch_size = input.shape[0]
|
709 |
+
loss = -input[torch.arange(batch_size), target] * current_weight
|
710 |
+
else:
|
711 |
+
# 3D case (N batch size, C classe, K dimensions)
|
712 |
+
# input (N batch size, C classes, K)
|
713 |
+
batch_size = input.shape[0]
|
714 |
+
extent = input.shape[2]
|
715 |
+
numel = batch_size * extent
|
716 |
+
indices = torch.arange(numel)
|
717 |
+
bdx = indices // extent
|
718 |
+
kdx = indices % extent
|
719 |
+
loss = -input[bdx, flat_target, kdx] * current_weight
|
720 |
+
loss = torch.reshape(loss, target.shape)
|
721 |
+
|
722 |
+
if reduction == "none":
|
723 |
+
return loss
|
724 |
+
elif reduction == "sum":
|
725 |
+
return torch.sum(loss)
|
726 |
+
else:
|
727 |
+
# calculate weighted mean of the loss function
|
728 |
+
return torch.sum(loss) / torch.sum(current_weight)
|
729 |
+
|
730 |
+
|
731 |
+
@register_decomposition(aten.nll_loss)
|
732 |
+
@out_wrapper()
|
733 |
+
@elementwise_type_promotion_wrapper(
|
734 |
+
type_promoting_args=("input",),
|
735 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
736 |
+
)
|
737 |
+
def nll_loss(
|
738 |
+
input: TensorLikeType,
|
739 |
+
target: TensorLikeType,
|
740 |
+
weight: Optional[TensorLikeType] = None,
|
741 |
+
size_average: Optional[bool] = None,
|
742 |
+
ignore_index: int = -100,
|
743 |
+
reduce: Optional[bool] = None,
|
744 |
+
reduction: str = "mean",
|
745 |
+
) -> TensorLikeType:
|
746 |
+
"""
|
747 |
+
Reference implementation of torch.nn.functional.nll_loss
|
748 |
+
"""
|
749 |
+
torch._check(
|
750 |
+
input.ndim > 0,
|
751 |
+
lambda: f"Expected input tensor to have 1 or more dimensions (got {input.ndim})",
|
752 |
+
)
|
753 |
+
|
754 |
+
# TODO: raise exception instead of converting value
|
755 |
+
# msg = "size_average and reduce args are deprecated, please use reduction argument."
|
756 |
+
# Convert these options for consistency with the eager mode
|
757 |
+
if size_average is not None or reduce is not None:
|
758 |
+
reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce)
|
759 |
+
|
760 |
+
# The expected behavior when the target and input have zero elements:
|
761 |
+
# reduction = 'none' --- tensor([])
|
762 |
+
# reduction = 'sum' --- tensor(0.)
|
763 |
+
# reduction = 'mean' --- tensor(nan)
|
764 |
+
# Mean reduction on empty tensors produces NaN. See the discussion in
|
765 |
+
# https://github.com/pytorch/pytorch/pull/64572#issuecomment-926504162
|
766 |
+
if input.numel() == 0 and target.numel() == 0:
|
767 |
+
if reduction == "none":
|
768 |
+
return torch.zeros_like(target)
|
769 |
+
elif reduction == "sum":
|
770 |
+
return torch.empty_like(target)
|
771 |
+
else:
|
772 |
+
return torch.full_like(target, float("nan"))
|
773 |
+
|
774 |
+
# The _nll_loss_nd helper function handles the most common cases.
|
775 |
+
# ndim == 1 (Single Example)
|
776 |
+
# => Batch Size: 1, Input: (C), Target: ()
|
777 |
+
# ndim == 2 (k = 1)
|
778 |
+
# => Batch Size: N, Input: (N, C), Target: (N)
|
779 |
+
# ndim == 3 (k > 1)
|
780 |
+
# => Batch Size: N, Input: (N, C, K), Target: (N, K)
|
781 |
+
if input.ndim <= 3:
|
782 |
+
return _nll_loss_nd(input, target, weight, reduction, ignore_index)
|
783 |
+
|
784 |
+
# For ndim > 3, we reshape the input and target to 3-D case.
|
785 |
+
# Input (N batch-size, C classes, k-dimensions)
|
786 |
+
# Target (N batch-size, k-dimensions)
|
787 |
+
torch._check(
|
788 |
+
input.ndim > 0 and target.ndim > 0 and target.shape[1:] == input.shape[2:],
|
789 |
+
lambda: (
|
790 |
+
"Expected input and target to both have ndim > 0 and "
|
791 |
+
"target.shape[1:] == input.shape[2:], but got "
|
792 |
+
f"target.shape {target.shape} and input.shape {input.shape}"
|
793 |
+
),
|
794 |
+
)
|
795 |
+
|
796 |
+
batch_size = input.shape[0]
|
797 |
+
num_classes = input.shape[1]
|
798 |
+
out_size = [batch_size] + list(target.shape[1:])
|
799 |
+
|
800 |
+
input = torch.reshape(input, [batch_size, num_classes, -1])
|
801 |
+
target = torch.reshape(target, [batch_size, -1])
|
802 |
+
if reduction != "none":
|
803 |
+
return _nll_loss_nd(input, target, weight, reduction, ignore_index)
|
804 |
+
else:
|
805 |
+
result = _nll_loss_nd(input, target, weight, reduction, ignore_index)
|
806 |
+
# reshape flattened inner-dim to original k-dimensions
|
807 |
+
return torch.reshape(result, out_size)
|
808 |
+
|
809 |
+
|
810 |
+
# TODO: This ref supports int reduction and out kwarg to be compatible with ATen:
|
811 |
+
# https://github.com/pytorch/pytorch/issues/83931
|
812 |
+
# TODO: Could be rewritten to support complex:
|
813 |
+
# https://github.com/pytorch/pytorch/pull/85041
|
814 |
+
@register_decomposition(aten.huber_loss)
|
815 |
+
@out_wrapper()
|
816 |
+
@elementwise_type_promotion_wrapper(
|
817 |
+
type_promoting_args=("input", "target"),
|
818 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
819 |
+
)
|
820 |
+
def huber_loss(
|
821 |
+
input: TensorLikeType,
|
822 |
+
target: TensorLikeType,
|
823 |
+
reduction: Union[str, int] = "mean",
|
824 |
+
delta: float = 1.0,
|
825 |
+
) -> TensorLikeType:
|
826 |
+
"""
|
827 |
+
Reference implementation of torch.nn.functional.huber_loss
|
828 |
+
"""
|
829 |
+
if type(reduction) is int:
|
830 |
+
reduction = _reduction_int_to_str(reduction)
|
831 |
+
_check_reduction_value(reduction) # type: ignore[arg-type]
|
832 |
+
torch._check(
|
833 |
+
delta > 0,
|
834 |
+
lambda: "huber_loss does not support non-positive values for delta.",
|
835 |
+
)
|
836 |
+
z = (input - target).abs()
|
837 |
+
loss = torch.where(z < delta, 0.5 * z * z, delta * (z - 0.5 * delta))
|
838 |
+
return _apply_loss_reduction(loss, reduction) # type: ignore[arg-type]
|
839 |
+
|
840 |
+
|
841 |
+
# tanhshrink does not use _make_elementwise_unary_reference because it does not support out
|
842 |
+
@elementwise_unary_scalar_wrapper
|
843 |
+
@elementwise_type_promotion_wrapper(
|
844 |
+
type_promoting_args=("a",),
|
845 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
846 |
+
)
|
847 |
+
def tanhshrink(a: TensorLikeType) -> TensorLikeType:
|
848 |
+
"""
|
849 |
+
Reference implementation of torch.nn.functional.tanhshrink
|
850 |
+
"""
|
851 |
+
if not isinstance(a, TensorLike):
|
852 |
+
raise RuntimeError(
|
853 |
+
"Expected a tensor input for an elementwise unary operation!"
|
854 |
+
)
|
855 |
+
return a - torch.tanh(a)
|
856 |
+
|
857 |
+
|
858 |
+
@register_decomposition(aten.threshold)
|
859 |
+
@_inplace_wrapper
|
860 |
+
@out_wrapper()
|
861 |
+
@elementwise_type_promotion_wrapper(
|
862 |
+
type_promoting_args=("a",),
|
863 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
864 |
+
)
|
865 |
+
def threshold(
|
866 |
+
a: TensorLikeType,
|
867 |
+
threshold: NumberType,
|
868 |
+
value: Union[bool, int, float],
|
869 |
+
inplace: bool = False,
|
870 |
+
) -> TensorLikeType:
|
871 |
+
"""
|
872 |
+
Reference implementation of torch.nn.functional.threshold
|
873 |
+
"""
|
874 |
+
|
875 |
+
if inplace:
|
876 |
+
raise NotImplementedError
|
877 |
+
|
878 |
+
return torch.where(a <= threshold, value, a)
|
879 |
+
|
880 |
+
|
881 |
+
# CompositeImplicitAutograd - don't register decomp
|
882 |
+
# No elementwise type promotion - core op doesn't explicitly type promote
|
883 |
+
def triplet_margin_loss(
|
884 |
+
anchor: TensorLikeType,
|
885 |
+
positive: TensorLikeType,
|
886 |
+
negative: TensorLikeType,
|
887 |
+
margin: float = 1.0,
|
888 |
+
p: float = 2,
|
889 |
+
eps: float = 1e-6,
|
890 |
+
swap: bool = False,
|
891 |
+
size_average: Optional[bool] = None,
|
892 |
+
reduce: Optional[bool] = None,
|
893 |
+
reduction: str = "mean",
|
894 |
+
) -> TensorLikeType:
|
895 |
+
if size_average is not None or reduce is not None:
|
896 |
+
# TODO: Raise exception instead of converting value. This is only for
|
897 |
+
# primTorch since it can drop support for deprecated arguments.
|
898 |
+
# msg = "size_average and reduce args are deprecated, please use reduction argument."
|
899 |
+
reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce)
|
900 |
+
|
901 |
+
# torch.nn.functional.triplet_margin_with_distance_loss has no ref defined
|
902 |
+
# since it's a pure Python implementation. Use this helper instead.
|
903 |
+
return _triplet_margin_with_distance_loss(
|
904 |
+
anchor=anchor,
|
905 |
+
positive=positive,
|
906 |
+
negative=negative,
|
907 |
+
distance_function=lambda x, y: torch.pairwise_distance(x, y, p, eps),
|
908 |
+
margin=margin,
|
909 |
+
swap=swap,
|
910 |
+
reduction=reduction,
|
911 |
+
)
|
912 |
+
|
913 |
+
|
914 |
+
# Pure Python impl - don't register decomp and don't add a ref. Defined as a
|
915 |
+
# helper here since triplet_margin_loss can be nicely implemented with it.
|
916 |
+
def _triplet_margin_with_distance_loss(
|
917 |
+
anchor: TensorLikeType,
|
918 |
+
positive: TensorLikeType,
|
919 |
+
negative: TensorLikeType,
|
920 |
+
*,
|
921 |
+
distance_function: Optional[
|
922 |
+
Callable[[TensorLikeType, TensorLikeType], TensorLikeType]
|
923 |
+
] = None,
|
924 |
+
margin: float = 1.0,
|
925 |
+
swap: bool = False,
|
926 |
+
reduction: str = "mean",
|
927 |
+
) -> TensorLikeType:
|
928 |
+
_check_reduction_value(reduction)
|
929 |
+
|
930 |
+
a_dim = anchor.ndim
|
931 |
+
p_dim = positive.ndim
|
932 |
+
n_dim = negative.ndim
|
933 |
+
torch._check(
|
934 |
+
a_dim == p_dim and p_dim == n_dim,
|
935 |
+
lambda: (
|
936 |
+
f"The anchor, positive, and negative tensors are expected to have "
|
937 |
+
f"the same number of dimensions, but got: anchor {a_dim}D, "
|
938 |
+
f"positive {p_dim}D, and negative {n_dim}D inputs"
|
939 |
+
),
|
940 |
+
)
|
941 |
+
|
942 |
+
if distance_function is None:
|
943 |
+
distance_function = torch.pairwise_distance
|
944 |
+
|
945 |
+
dist_pos = distance_function(anchor, positive)
|
946 |
+
dist_neg = distance_function(anchor, negative)
|
947 |
+
# The distance swap is described in the paper "Learning shallow
|
948 |
+
# convolutional feature descriptors with triplet losses" by V. Balntas, E.
|
949 |
+
# Riba et al. If True, and if the positive example is closer to the
|
950 |
+
# negative example than the anchor is, swaps the positive example and the
|
951 |
+
# anchor in the loss computation.
|
952 |
+
if swap:
|
953 |
+
dist_swap = distance_function(positive, negative)
|
954 |
+
dist_neg = torch.minimum(dist_neg, dist_swap)
|
955 |
+
loss = torch.clamp_min(margin + dist_pos - dist_neg, 0)
|
956 |
+
return _apply_loss_reduction(loss, reduction)
|
957 |
+
|
958 |
+
|
959 |
+
@register_decomposition(aten.hardtanh)
|
960 |
+
@_inplace_wrapper
|
961 |
+
@out_wrapper()
|
962 |
+
@elementwise_unary_scalar_wrapper
|
963 |
+
@elementwise_type_promotion_wrapper(
|
964 |
+
type_promoting_args=("a"),
|
965 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
966 |
+
)
|
967 |
+
def hardtanh(
|
968 |
+
a: TensorLikeType,
|
969 |
+
min_val: NumberType = -1,
|
970 |
+
max_val: NumberType = 1,
|
971 |
+
inplace: bool = False,
|
972 |
+
) -> TensorLikeType:
|
973 |
+
"""
|
974 |
+
Reference implementation of torch.nn.functional.hardtanh
|
975 |
+
"""
|
976 |
+
if inplace:
|
977 |
+
raise NotImplementedError
|
978 |
+
if utils.is_boolean_dtype(a.dtype):
|
979 |
+
raise RuntimeError("Bool inputs not supported for hardtanh")
|
980 |
+
|
981 |
+
# preserve legacy behavior of boundaries not causing type promotion
|
982 |
+
if utils.is_integer_dtype(a.dtype):
|
983 |
+
min_val = int(min_val) # type: ignore[arg-type]
|
984 |
+
max_val = int(max_val) # type: ignore[arg-type]
|
985 |
+
if not (a.dtype != torch.uint8 or (min_val >= 0 and max_val >= 0)):
|
986 |
+
raise RuntimeError(
|
987 |
+
"Cannot do hardtanh on an unsigned type with negative limits"
|
988 |
+
)
|
989 |
+
return torch.clamp(a, min_val, max_val) # type: ignore[arg-type]
|
990 |
+
|
991 |
+
|
992 |
+
@register_decomposition(aten.gelu)
|
993 |
+
@out_wrapper()
|
994 |
+
@elementwise_unary_scalar_wrapper
|
995 |
+
@elementwise_type_promotion_wrapper(
|
996 |
+
type_promoting_args=("a",),
|
997 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
998 |
+
)
|
999 |
+
def gelu(a: TensorLikeType, approximate: str = "none") -> TensorLikeType:
|
1000 |
+
"""
|
1001 |
+
Reference implementation of torch.nn.functional.gelu
|
1002 |
+
"""
|
1003 |
+
if not isinstance(a, TensorLike):
|
1004 |
+
raise RuntimeError(
|
1005 |
+
"Expected a tensor input for an elementwise unary operation!"
|
1006 |
+
)
|
1007 |
+
M_SQRT2 = 1.41421356237309504880
|
1008 |
+
M_SQRT1_2 = 0.70710678118654752440
|
1009 |
+
M_2_SQRTPI = 1.12837916709551257390
|
1010 |
+
if approximate == "tanh":
|
1011 |
+
kBeta = M_SQRT2 * M_2_SQRTPI * 0.5
|
1012 |
+
kKappa = 0.044715
|
1013 |
+
a_cube = a * a * a
|
1014 |
+
inner = kBeta * (a + kKappa * a_cube)
|
1015 |
+
return 0.5 * a * (1 + torch.tanh(inner))
|
1016 |
+
elif approximate == "none":
|
1017 |
+
kAlpha = M_SQRT1_2
|
1018 |
+
return a * 0.5 * (1 + torch.erf(a * kAlpha))
|
1019 |
+
else:
|
1020 |
+
raise RuntimeError("approximate argument must be either none or tanh.")
|
1021 |
+
|
1022 |
+
|
1023 |
+
# CompositeImplicitAutograd - don't register decomp
|
1024 |
+
@elementwise_type_promotion_wrapper(
|
1025 |
+
type_promoting_args=("input", "target"),
|
1026 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
1027 |
+
)
|
1028 |
+
def poisson_nll_loss(
|
1029 |
+
input: TensorLikeType,
|
1030 |
+
target: TensorLikeType,
|
1031 |
+
log_input: bool = True,
|
1032 |
+
full: bool = False,
|
1033 |
+
size_average: Optional[bool] = None,
|
1034 |
+
eps: float = 1e-8,
|
1035 |
+
reduce: Optional[bool] = None,
|
1036 |
+
reduction: str = "mean",
|
1037 |
+
) -> TensorLikeType:
|
1038 |
+
"""
|
1039 |
+
Reference implementation of torch.nn.functional.poisson_nll_loss
|
1040 |
+
"""
|
1041 |
+
if size_average is not None or reduce is not None:
|
1042 |
+
# TODO: Raise exception instead of converting value. This is only for
|
1043 |
+
# primTorch since it can drop support for deprecated arguments.
|
1044 |
+
# msg = "size_average and reduce args are deprecated, please use reduction argument."
|
1045 |
+
reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce)
|
1046 |
+
_check_reduction_value(reduction)
|
1047 |
+
if log_input:
|
1048 |
+
loss = torch.exp(input) - target * input
|
1049 |
+
else:
|
1050 |
+
loss = input - target * torch.log(input + eps)
|
1051 |
+
|
1052 |
+
if full:
|
1053 |
+
stirling_term = (
|
1054 |
+
target * torch.log(target) - target + 0.5 * torch.log(2 * torch.pi * target)
|
1055 |
+
)
|
1056 |
+
# avoid inplace add
|
1057 |
+
loss = loss + stirling_term.masked_fill(target <= 1, 0)
|
1058 |
+
return _apply_loss_reduction(loss, reduction)
|
1059 |
+
|
1060 |
+
|
1061 |
+
@register_decomposition(aten.prelu)
|
1062 |
+
@elementwise_type_promotion_wrapper(
|
1063 |
+
type_promoting_args=("a", "weight"),
|
1064 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
1065 |
+
)
|
1066 |
+
def prelu(a: TensorLikeType, weight: TensorLikeType) -> TensorLikeType:
|
1067 |
+
"""
|
1068 |
+
Reference implementation of torch.nn.functional.prelu
|
1069 |
+
"""
|
1070 |
+
torch._check(
|
1071 |
+
isinstance(a, TensorLike),
|
1072 |
+
lambda: f"prelu: Expected `a` to be tensor, but got: {type(a)}",
|
1073 |
+
)
|
1074 |
+
torch._check(
|
1075 |
+
isinstance(weight, TensorLike),
|
1076 |
+
lambda: f"prelu: Expected `weight` to be tensor, but got: {type(weight)}",
|
1077 |
+
)
|
1078 |
+
|
1079 |
+
if weight.numel() != 1:
|
1080 |
+
torch._check(a.ndim > 0, lambda: "Not allow zero-dim input tensor.")
|
1081 |
+
channel_size = a.shape[1] if a.ndim >= 2 else 1
|
1082 |
+
torch._check(
|
1083 |
+
weight.numel() == channel_size,
|
1084 |
+
lambda: f"Mismatch of parameter numbers and input channel size. Found parameter numbers ="
|
1085 |
+
f" {weight.numel()} and channel size = {channel_size}.",
|
1086 |
+
)
|
1087 |
+
|
1088 |
+
torch._check(
|
1089 |
+
weight.ndim == 0 or weight.ndim == 1,
|
1090 |
+
lambda: f"prelu: Expected `weight` to be a scalar or 1D tensor, but got: "
|
1091 |
+
f"ndim = {weight.ndim}",
|
1092 |
+
)
|
1093 |
+
if a.ndim == 0:
|
1094 |
+
weight = weight[0] if weight.ndim == 1 else weight
|
1095 |
+
else:
|
1096 |
+
weight = prims.broadcast_in_dim(
|
1097 |
+
weight, a.shape, tuple() if weight.ndim == 0 else (0 if a.ndim == 1 else 1,)
|
1098 |
+
)
|
1099 |
+
|
1100 |
+
return torch.where(a > 0, a, a * weight)
|
1101 |
+
|
1102 |
+
|
1103 |
+
@register_decomposition(aten.relu6)
|
1104 |
+
@_inplace_wrapper
|
1105 |
+
@out_wrapper()
|
1106 |
+
def relu6(a: TensorLikeType, inplace: bool = False) -> TensorLikeType:
|
1107 |
+
"""
|
1108 |
+
Reference implementation of torch.nn.functional.relu6
|
1109 |
+
"""
|
1110 |
+
if inplace:
|
1111 |
+
raise NotImplementedError
|
1112 |
+
|
1113 |
+
# See https://github.com/pytorch/pytorch/pull/81142#discussion_r918220126
|
1114 |
+
# It may be better to use clamp here, but we use hardtanh to replicate
|
1115 |
+
# the behavior of the existing implementation
|
1116 |
+
return torch.nn.functional.hardtanh(a, 0, 6)
|
1117 |
+
|
1118 |
+
|
1119 |
+
@register_decomposition(aten.glu)
|
1120 |
+
@out_wrapper()
|
1121 |
+
@elementwise_type_promotion_wrapper(
|
1122 |
+
type_promoting_args=("a",),
|
1123 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
1124 |
+
)
|
1125 |
+
def glu(a: TensorLikeType, dim: int = -1) -> TensorLikeType:
|
1126 |
+
dim = utils.canonicalize_dims(a.ndim, dim)
|
1127 |
+
torch._check(
|
1128 |
+
a.shape[dim] % 2 == 0,
|
1129 |
+
lambda: f"Halving dimension must be even, but dimension {dim} is size {a.shape[dim]}",
|
1130 |
+
)
|
1131 |
+
b, c = torch.tensor_split(a, 2, dim)
|
1132 |
+
|
1133 |
+
return b * torch.sigmoid(c)
|
1134 |
+
|
1135 |
+
|
1136 |
+
@register_decomposition(aten.pairwise_distance)
|
1137 |
+
@out_wrapper()
|
1138 |
+
def pairwise_distance(
|
1139 |
+
x1: TensorLikeType,
|
1140 |
+
x2: TensorLikeType,
|
1141 |
+
p: NumberType = 2.0,
|
1142 |
+
eps: NumberType = 1e-6,
|
1143 |
+
keepdim=False,
|
1144 |
+
) -> TensorLikeType:
|
1145 |
+
return torch.linalg.vector_norm(x1 - x2 + eps, ord=p, dim=-1, keepdim=keepdim)
|
1146 |
+
|
1147 |
+
|
1148 |
+
@register_decomposition(aten.pdist)
|
1149 |
+
@out_wrapper()
|
1150 |
+
@elementwise_type_promotion_wrapper(
|
1151 |
+
type_promoting_args=("a",),
|
1152 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
1153 |
+
)
|
1154 |
+
def pdist(a: TensorLikeType, p: float = 2) -> TensorLikeType:
|
1155 |
+
torch._check(a.ndim == 2, lambda: f"pdist only supports 2D tensors, got: {a.ndim}D")
|
1156 |
+
torch._check(p >= 0, lambda: "pdist only supports non-negative p values")
|
1157 |
+
# For p == 2 we can use an efficient implementation, but other values of p
|
1158 |
+
# require creating a much bigger tensor for an intermediate step
|
1159 |
+
if p == 2:
|
1160 |
+
aTa = torch.mm(a, a.T)
|
1161 |
+
aTa_diag = torch.diag(aTa)
|
1162 |
+
t = torch.sqrt(torch.clamp(aTa_diag + aTa_diag.unsqueeze(-1) - 2 * aTa, min=0))
|
1163 |
+
else:
|
1164 |
+
t = torch.linalg.vector_norm(a.unsqueeze(1) - a, ord=p, dim=2)
|
1165 |
+
i = torch.triu_indices(t.shape[0], t.shape[1], offset=1, device=a.device)
|
1166 |
+
return t.flatten().index_select(0, i[0] * t.shape[0] + i[1])
|
1167 |
+
|
1168 |
+
|
1169 |
+
# Needed as aten.{celu_,elu_...} exist (even if they don't have the in-place kwarg)
|
1170 |
+
celu_ = _make_inplace(celu)
|
1171 |
+
elu_ = _make_inplace(elu)
|
1172 |
+
mish_ = _make_inplace(mish)
|
1173 |
+
selu_ = _make_inplace(selu)
|
1174 |
+
threshold_ = _make_inplace(threshold)
|
env-llmeval/lib/python3.10/site-packages/torch/_refs/nn/functional/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (24.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_refs/special/__init__.py
ADDED
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
from typing import Optional, Union
|
3 |
+
|
4 |
+
import torch
|
5 |
+
import torch._prims as prims
|
6 |
+
import torch._prims_common as utils
|
7 |
+
import torch._refs as refs
|
8 |
+
|
9 |
+
from torch import Tensor
|
10 |
+
from torch._decomp import register_decomposition
|
11 |
+
from torch._prims_common import (
|
12 |
+
ELEMENTWISE_TYPE_PROMOTION_KIND,
|
13 |
+
Number,
|
14 |
+
NumberType,
|
15 |
+
TensorLike,
|
16 |
+
TensorLikeType,
|
17 |
+
)
|
18 |
+
from torch._prims_common.wrappers import elementwise_type_promotion_wrapper, out_wrapper
|
19 |
+
from torch._refs import (
|
20 |
+
_make_alias,
|
21 |
+
_make_elementwise_binary_reference,
|
22 |
+
_make_elementwise_unary_reference,
|
23 |
+
)
|
24 |
+
|
25 |
+
|
26 |
+
__all__ = [
|
27 |
+
"bessel_j0",
|
28 |
+
"bessel_j1",
|
29 |
+
"entr",
|
30 |
+
"erfcx",
|
31 |
+
"expit",
|
32 |
+
"i0e",
|
33 |
+
"i1",
|
34 |
+
"i1e",
|
35 |
+
"log_ndtr",
|
36 |
+
"logit",
|
37 |
+
"log_softmax",
|
38 |
+
"multigammaln",
|
39 |
+
"ndtr",
|
40 |
+
"ndtri",
|
41 |
+
"softmax",
|
42 |
+
"spherical_bessel_j0",
|
43 |
+
"xlog1py",
|
44 |
+
"zeta",
|
45 |
+
]
|
46 |
+
aten = torch._ops.ops.aten
|
47 |
+
|
48 |
+
|
49 |
+
@_make_elementwise_unary_reference(
|
50 |
+
ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
51 |
+
)
|
52 |
+
def bessel_j0(a: TensorLikeType) -> TensorLikeType:
|
53 |
+
return prims.bessel_j0(a)
|
54 |
+
|
55 |
+
|
56 |
+
@_make_elementwise_unary_reference(
|
57 |
+
ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
58 |
+
)
|
59 |
+
def bessel_j1(a: TensorLikeType) -> TensorLikeType:
|
60 |
+
return prims.bessel_j1(a)
|
61 |
+
|
62 |
+
|
63 |
+
@register_decomposition(aten.special_entr)
|
64 |
+
@out_wrapper()
|
65 |
+
@elementwise_type_promotion_wrapper(
|
66 |
+
type_promoting_args=("a",),
|
67 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
68 |
+
)
|
69 |
+
def entr(a: TensorLikeType) -> TensorLikeType:
|
70 |
+
return torch.where(
|
71 |
+
torch.isnan(a),
|
72 |
+
a,
|
73 |
+
torch.where(a > 0, -a * torch.log(a), torch.where(a == 0, 0, -torch.inf)),
|
74 |
+
)
|
75 |
+
|
76 |
+
|
77 |
+
@register_decomposition(aten.special_erfcx)
|
78 |
+
@out_wrapper()
|
79 |
+
@elementwise_type_promotion_wrapper(
|
80 |
+
type_promoting_args=("a",),
|
81 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
82 |
+
)
|
83 |
+
def erfcx(a: TensorLikeType) -> TensorLikeType:
|
84 |
+
return prims.erfcx(a)
|
85 |
+
|
86 |
+
|
87 |
+
# alias for sigmoid
|
88 |
+
expit = _make_alias(torch.sigmoid, "expit")
|
89 |
+
|
90 |
+
|
91 |
+
@_make_elementwise_unary_reference(
|
92 |
+
ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
93 |
+
)
|
94 |
+
def i0e(a: TensorLikeType) -> TensorLikeType:
|
95 |
+
return prims.bessel_i0e(a)
|
96 |
+
|
97 |
+
|
98 |
+
@_make_elementwise_unary_reference(
|
99 |
+
ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
100 |
+
)
|
101 |
+
def i1(a: TensorLikeType) -> TensorLikeType:
|
102 |
+
return prims.bessel_i1(a)
|
103 |
+
|
104 |
+
|
105 |
+
@_make_elementwise_unary_reference(
|
106 |
+
ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
107 |
+
)
|
108 |
+
def i1e(a: TensorLikeType) -> TensorLikeType:
|
109 |
+
return prims.bessel_i1e(a)
|
110 |
+
|
111 |
+
|
112 |
+
@register_decomposition(aten.special_log_ndtr)
|
113 |
+
@out_wrapper()
|
114 |
+
@elementwise_type_promotion_wrapper(
|
115 |
+
type_promoting_args=("a",),
|
116 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
117 |
+
)
|
118 |
+
def log_ndtr(a: TensorLikeType) -> TensorLikeType:
|
119 |
+
# Note: M_SQRT1_2 is the value of 1 / √2
|
120 |
+
M_SQRT1_2 = 0.707106781186547524400844362104849039
|
121 |
+
t = a * M_SQRT1_2
|
122 |
+
return torch.where(
|
123 |
+
a < 1.0,
|
124 |
+
torch.log(torch.special.erfcx(-t) / 2) - t * t,
|
125 |
+
torch.log1p(-torch.erfc(t) / 2),
|
126 |
+
)
|
127 |
+
|
128 |
+
|
129 |
+
@register_decomposition(aten.logit)
|
130 |
+
@out_wrapper()
|
131 |
+
@elementwise_type_promotion_wrapper(
|
132 |
+
type_promoting_args=("self",),
|
133 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
134 |
+
)
|
135 |
+
def logit(self: TensorLikeType, eps: Optional[float] = None) -> TensorLikeType:
|
136 |
+
if eps is None:
|
137 |
+
eps = -1.0
|
138 |
+
lo = eps
|
139 |
+
hi = 1 - eps
|
140 |
+
self = torch.clamp(self, lo, hi)
|
141 |
+
return torch.log(torch.true_divide(self, torch.sub(1, self)))
|
142 |
+
|
143 |
+
|
144 |
+
@register_decomposition(aten.special_xlog1py)
|
145 |
+
@out_wrapper()
|
146 |
+
@elementwise_type_promotion_wrapper(
|
147 |
+
type_promoting_args=("a", "b"),
|
148 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
149 |
+
)
|
150 |
+
def xlog1py(a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType]):
|
151 |
+
torch._check(
|
152 |
+
isinstance(a, TensorLike) or isinstance(b, TensorLike),
|
153 |
+
lambda: 'Expected either argument a or b to be a Tensor"',
|
154 |
+
)
|
155 |
+
|
156 |
+
# Operations like eq and log do not handle scalar values, so we convert them to scalar_tensors.
|
157 |
+
if isinstance(a, TensorLike) and isinstance(b, Number):
|
158 |
+
b = refs.scalar_tensor(b, dtype=a.dtype, device=a.device)
|
159 |
+
elif isinstance(b, TensorLike) and isinstance(a, Number):
|
160 |
+
a = refs.scalar_tensor(a, dtype=b.dtype, device=b.device)
|
161 |
+
|
162 |
+
# mypy: expected "Tensor"
|
163 |
+
assert isinstance(a, TensorLike)
|
164 |
+
assert isinstance(b, TensorLike)
|
165 |
+
rhs = torch.where(torch.eq(a, 0), 0, torch.mul(a, torch.log1p(b)))
|
166 |
+
return torch.where(torch.isnan(b), float("nan"), rhs)
|
167 |
+
|
168 |
+
|
169 |
+
@register_decomposition(aten.mvlgamma)
|
170 |
+
@out_wrapper()
|
171 |
+
@elementwise_type_promotion_wrapper(
|
172 |
+
type_promoting_args=("a",),
|
173 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
174 |
+
)
|
175 |
+
def multigammaln(a: TensorLikeType, p: int) -> TensorLikeType:
|
176 |
+
c = 0.25 * p * (p - 1) * math.log(math.pi)
|
177 |
+
b = 0.5 * torch.arange(start=(1 - p), end=1, step=1, dtype=a.dtype, device=a.device)
|
178 |
+
return torch.sum(torch.lgamma(a.unsqueeze(-1) + b), dim=-1) + c
|
179 |
+
|
180 |
+
|
181 |
+
@register_decomposition(aten.special_ndtr)
|
182 |
+
@out_wrapper()
|
183 |
+
@elementwise_type_promotion_wrapper(
|
184 |
+
type_promoting_args=("a",),
|
185 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
186 |
+
)
|
187 |
+
def ndtr(a: TensorLikeType) -> TensorLikeType:
|
188 |
+
# Note: M_SQRT1_2 is the value of 1 / √2
|
189 |
+
M_SQRT1_2 = 0.707106781186547524400844362104849039
|
190 |
+
a_sqrt_2 = a * M_SQRT1_2
|
191 |
+
return (1 + torch.erf(a_sqrt_2)) * 0.5
|
192 |
+
|
193 |
+
|
194 |
+
@register_decomposition(aten.special_ndtri)
|
195 |
+
@out_wrapper()
|
196 |
+
@elementwise_type_promotion_wrapper(
|
197 |
+
type_promoting_args=("a",),
|
198 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
199 |
+
)
|
200 |
+
def ndtri(a: TensorLikeType) -> TensorLikeType:
|
201 |
+
return prims.ndtri(a)
|
202 |
+
|
203 |
+
|
204 |
+
# Forwarding alias: the special variant doesn't support the out kwarg
|
205 |
+
# CompositeImplicitAutograd - don't register decomp
|
206 |
+
def log_softmax(
|
207 |
+
a: TensorLikeType,
|
208 |
+
dim: int,
|
209 |
+
dtype: Optional[torch.dtype] = None,
|
210 |
+
) -> TensorLikeType:
|
211 |
+
return torch.log_softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload]
|
212 |
+
|
213 |
+
|
214 |
+
# Forwarding alias: the special variant doesn't support the out kwarg
|
215 |
+
# CompositeImplicitAutograd - don't register decomp
|
216 |
+
def softmax(
|
217 |
+
a: TensorLikeType,
|
218 |
+
dim: int,
|
219 |
+
dtype: Optional[torch.dtype] = None,
|
220 |
+
) -> TensorLikeType:
|
221 |
+
return torch.softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload]
|
222 |
+
|
223 |
+
|
224 |
+
@_make_elementwise_unary_reference(
|
225 |
+
ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
226 |
+
)
|
227 |
+
def spherical_bessel_j0(a: TensorLikeType) -> TensorLikeType:
|
228 |
+
return prims.spherical_bessel_j0(a)
|
229 |
+
|
230 |
+
|
231 |
+
# TODO: add docstring
|
232 |
+
@_make_elementwise_binary_reference(
|
233 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
234 |
+
)
|
235 |
+
def zeta(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType:
|
236 |
+
return prims.zeta(a, b)
|
env-llmeval/lib/python3.10/site-packages/torch/_refs/special/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (5.09 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (482 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/fake_tensor.cpython-310.pyc
ADDED
Binary file (44.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/fake_utils.cpython-310.pyc
ADDED
Binary file (5.57 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/functional_tensor.cpython-310.pyc
ADDED
Binary file (16.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/meta_utils.cpython-310.pyc
ADDED
Binary file (13 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/schema_check_mode.cpython-310.pyc
ADDED
Binary file (6.09 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_subclasses/fake_utils.py
ADDED
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import functools
|
2 |
+
import warnings
|
3 |
+
from typing import Callable, Union
|
4 |
+
|
5 |
+
import torch
|
6 |
+
import torch.utils._pytree as pytree
|
7 |
+
from torch._ops import OpOverload
|
8 |
+
from torch._subclasses.fake_tensor import (
|
9 |
+
FakeTensorMode,
|
10 |
+
tree_flatten_only,
|
11 |
+
UnsupportedFakeTensorException,
|
12 |
+
)
|
13 |
+
from torch.utils._python_dispatch import TorchDispatchMode
|
14 |
+
|
15 |
+
|
16 |
+
aten = torch._ops.ops.aten
|
17 |
+
|
18 |
+
|
19 |
+
def outputs_alias_inputs(outputs, inputs):
|
20 |
+
input_storages = {
|
21 |
+
inp._typed_storage()._cdata
|
22 |
+
for inp in tree_flatten_only(torch.Tensor, inputs)
|
23 |
+
if torch._C._has_storage(inp)
|
24 |
+
}
|
25 |
+
return any(
|
26 |
+
torch._C._has_storage(out) and out._typed_storage()._cdata in input_storages
|
27 |
+
for out in tree_flatten_only(torch.Tensor, outputs)
|
28 |
+
)
|
29 |
+
|
30 |
+
|
31 |
+
def outputs_are_inputs(outputs, inputs):
|
32 |
+
input_ids = {id(inp) for inp in tree_flatten_only(torch.Tensor, inputs)}
|
33 |
+
return any(id(out) in input_ids for out in tree_flatten_only(torch.Tensor, outputs))
|
34 |
+
|
35 |
+
|
36 |
+
def output_alias_each_other(outputs):
|
37 |
+
storages = set()
|
38 |
+
for out in tree_flatten_only(torch.Tensor, outputs):
|
39 |
+
if not torch._C._has_storage(out):
|
40 |
+
continue
|
41 |
+
stor = out._typed_storage()._cdata
|
42 |
+
if stor in storages:
|
43 |
+
return True
|
44 |
+
storages.add(stor)
|
45 |
+
return False
|
46 |
+
|
47 |
+
|
48 |
+
def is_sdpa_error(func, idx, e):
|
49 |
+
if (
|
50 |
+
(
|
51 |
+
func is aten._scaled_dot_product_flash_attention.default
|
52 |
+
or func is aten._flash_attention_forward.default
|
53 |
+
)
|
54 |
+
and idx in (6, 7)
|
55 |
+
and "Devices" in repr(e)
|
56 |
+
):
|
57 |
+
return True
|
58 |
+
if (
|
59 |
+
(
|
60 |
+
func is aten._scaled_dot_product_efficient_attention.default
|
61 |
+
or func is aten._efficient_attention_forward.default
|
62 |
+
)
|
63 |
+
and idx in (2, 3)
|
64 |
+
and "Devices" in repr(e)
|
65 |
+
):
|
66 |
+
return True
|
67 |
+
return False
|
68 |
+
|
69 |
+
|
70 |
+
class CrossRefFakeMode(TorchDispatchMode):
|
71 |
+
def __init__(
|
72 |
+
self,
|
73 |
+
ignore_op_fn: Union[Callable[[OpOverload], bool], None] = None,
|
74 |
+
*,
|
75 |
+
check_strides=True,
|
76 |
+
check_aliasing=True,
|
77 |
+
):
|
78 |
+
self.ignore_op_fn = (
|
79 |
+
ignore_op_fn if ignore_op_fn is not None else lambda fn: False
|
80 |
+
)
|
81 |
+
self.check_strides = check_strides
|
82 |
+
self.check_aliasing = check_aliasing
|
83 |
+
|
84 |
+
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
|
85 |
+
kwargs = kwargs or {}
|
86 |
+
|
87 |
+
fake_r = None
|
88 |
+
|
89 |
+
# empty_like excluded for now due to sparse complex
|
90 |
+
# aten._to_dense.default this one is getting called with csc
|
91 |
+
if (
|
92 |
+
func
|
93 |
+
not in (
|
94 |
+
aten.lift_fresh.default,
|
95 |
+
aten.lift_fresh_copy.default,
|
96 |
+
aten.set_.source_Storage_storage_offset,
|
97 |
+
)
|
98 |
+
and not self.ignore_op_fn(func)
|
99 |
+
and torch.Tag.dynamic_output_shape not in func.tags
|
100 |
+
and torch.Tag.inplace_view not in func.tags
|
101 |
+
and torch.Tag.data_dependent_output not in func.tags
|
102 |
+
):
|
103 |
+
# Do not import symbolic_shapes at the top of the module as it imports sympy and that's slow
|
104 |
+
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
105 |
+
|
106 |
+
try:
|
107 |
+
# TODO: enable_python_dispatcher() here
|
108 |
+
with FakeTensorMode(shape_env=ShapeEnv()) as fake_mode:
|
109 |
+
fake_args, fake_kwargs = pytree.tree_map_only(
|
110 |
+
torch.Tensor,
|
111 |
+
functools.partial(fake_mode.from_tensor, static_shapes=True),
|
112 |
+
(args, kwargs),
|
113 |
+
)
|
114 |
+
with warnings.catch_warnings():
|
115 |
+
fake_r = func(*fake_args, **fake_kwargs)
|
116 |
+
except UnsupportedFakeTensorException:
|
117 |
+
pass
|
118 |
+
|
119 |
+
context = (
|
120 |
+
f"When comparing the output of {func} on FakeTensor and concrete Tensors, "
|
121 |
+
f"found"
|
122 |
+
)
|
123 |
+
r = func(*args, **kwargs)
|
124 |
+
if fake_r is not None:
|
125 |
+
r_flat = pytree.tree_leaves(r)
|
126 |
+
f_flat = pytree.tree_leaves(fake_r)
|
127 |
+
assert len(f_flat) == len(
|
128 |
+
r_flat
|
129 |
+
), f"{context} mismatch in number of returns {len(f_flat)} != {len(r_flat)}"
|
130 |
+
|
131 |
+
if self.check_aliasing:
|
132 |
+
r_aliasing = outputs_alias_inputs(r, (args, kwargs))
|
133 |
+
f_aliasing = outputs_alias_inputs(fake_r, (fake_args, fake_kwargs))
|
134 |
+
assert (
|
135 |
+
r_aliasing == f_aliasing
|
136 |
+
), f"{context} mismatch in outputs_alias_inputs check {f_aliasing} != {r_aliasing}"
|
137 |
+
|
138 |
+
r_identity_eq = outputs_are_inputs(r, (args, kwargs))
|
139 |
+
f_identity_eq = outputs_are_inputs(fake_r, (fake_args, fake_kwargs))
|
140 |
+
assert (
|
141 |
+
r_identity_eq == f_identity_eq
|
142 |
+
), f"{context} mismatch in outputs_are_inputs check {f_identity_eq} != {r_identity_eq}"
|
143 |
+
|
144 |
+
r_output_alias_each_other = output_alias_each_other(r)
|
145 |
+
f_output_alias_each_other = output_alias_each_other(fake_r)
|
146 |
+
assert r_output_alias_each_other == f_output_alias_each_other, (
|
147 |
+
f"{context} mismatch in outputs_alias_each_other check "
|
148 |
+
f"{f_output_alias_each_other} != {r_output_alias_each_other}"
|
149 |
+
)
|
150 |
+
|
151 |
+
for idx, (r_out, fake_out) in enumerate(
|
152 |
+
zip(pytree.tree_leaves(r), pytree.tree_leaves(fake_r))
|
153 |
+
):
|
154 |
+
r_is_ten = isinstance(r_out, torch.Tensor)
|
155 |
+
assert r_is_ten == isinstance(
|
156 |
+
fake_out, torch.Tensor
|
157 |
+
), f"{context} mismatched number of tensor outputs"
|
158 |
+
if r_is_ten:
|
159 |
+
assert r_out.requires_grad == fake_out.requires_grad, (
|
160 |
+
f"{context} mismatched requires_grad-ness of outputs. "
|
161 |
+
f"This usually means that you have added autograd support "
|
162 |
+
f"for your operator at a dispatch key other than Autograd, "
|
163 |
+
f"which will lead to problems"
|
164 |
+
)
|
165 |
+
if torch._C._has_storage(r_out):
|
166 |
+
r_offset = r_out.storage_offset()
|
167 |
+
f_offset = fake_out.storage_offset()
|
168 |
+
assert (
|
169 |
+
r_offset == f_offset
|
170 |
+
), f"{context} mismatched storage offset"
|
171 |
+
|
172 |
+
try:
|
173 |
+
torch._prims.utils.compare_tensor_meta(
|
174 |
+
r_out,
|
175 |
+
fake_out,
|
176 |
+
check_strides=self.check_strides,
|
177 |
+
allow_rhs_unbacked=True,
|
178 |
+
)
|
179 |
+
except Exception as e:
|
180 |
+
if is_sdpa_error(func, idx, e):
|
181 |
+
continue
|
182 |
+
error_message = (
|
183 |
+
f"{context} mismatched tensor metadata: {e}"
|
184 |
+
if len(r_flat) == 1
|
185 |
+
else f"{context} mismatched tensor metadata for output[{idx}]: {e}"
|
186 |
+
)
|
187 |
+
raise RuntimeError(error_message) from e
|
188 |
+
return r
|
env-llmeval/lib/python3.10/site-packages/torch/_subclasses/functional_tensor.py
ADDED
@@ -0,0 +1,552 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import contextlib
|
2 |
+
from abc import ABC, abstractmethod
|
3 |
+
from typing import Any, Callable, ContextManager, Tuple
|
4 |
+
|
5 |
+
import torch
|
6 |
+
import torch.utils._pytree as pytree
|
7 |
+
from torch._C import _functionalization_reapply_views_tls as _reapply_views
|
8 |
+
from torch.utils._python_dispatch import return_and_correct_aliasing, TorchDispatchMode
|
9 |
+
|
10 |
+
not_implemented_log = torch._logging.getArtifactLogger(__name__, "not_implemented")
|
11 |
+
|
12 |
+
|
13 |
+
class FunctionalTensor(torch.Tensor):
|
14 |
+
"""
|
15 |
+
Functional tensors represent tensors that will remove mutations
|
16 |
+
from a program. If you perform a mutable operation on a functional tensor,
|
17 |
+
it will re-dispatch to the functional variant of that operation.
|
18 |
+
|
19 |
+
Historically, functionalization is implemented in C++ in the dispatcher.
|
20 |
+
This class is a lightweight python shim around the C++ functionalization logic.
|
21 |
+
|
22 |
+
FunctionalTensor is required to be used with a corresponding
|
23 |
+
FunctionalTensormode active, because it relies
|
24 |
+
on using the mode for dispatch (which can properly handle factory functions).
|
25 |
+
"""
|
26 |
+
|
27 |
+
elem: torch.Tensor
|
28 |
+
# Indicates to our torch_dispatch dispatching infra that
|
29 |
+
# this is an "infra" mode with lower dispatching precedence.
|
30 |
+
_mode_key = torch._C._TorchDispatchModeKey.FUNCTIONAL
|
31 |
+
|
32 |
+
# Note: The reason we add these extra keys to our FunctionalTensor subclass
|
33 |
+
# is to mirror the behavior of C++ functionalization (we can choose to change this
|
34 |
+
# later, as long as it doesn't break anything).
|
35 |
+
# FunctionalTensorWrapper copies **all** dispatch keys from the inner tensor
|
36 |
+
# to the wrapper, excluding functorch and python dispatch keys.
|
37 |
+
# Here I'm trying to re-use the keyset the functorch wrapper subclasses copy,
|
38 |
+
# except that they don't include ZeroTensor so I'm manually adding it in.
|
39 |
+
_extra_dispatch_keys = torch._C._additional_keys_to_prop_for_wrapper_tensors.add(
|
40 |
+
torch._C.DispatchKey.ZeroTensor
|
41 |
+
)
|
42 |
+
|
43 |
+
# These are all aten ops that correspond to metadata queries.
|
44 |
+
# We want FunctionalTensor to be able to handle them directly.
|
45 |
+
metadata_fns = [
|
46 |
+
torch.ops.aten.is_contiguous.default, # type: ignore[has-type]
|
47 |
+
torch.ops.aten.is_contiguous.memory_format, # type: ignore[has-type]
|
48 |
+
torch.ops.aten.is_strides_like_format.default, # type: ignore[has-type]
|
49 |
+
torch.ops.aten.is_non_overlapping_and_dense.default, # type: ignore[has-type]
|
50 |
+
torch.ops.aten.size.default, # type: ignore[has-type]
|
51 |
+
torch.ops.aten.sym_size.default, # type: ignore[has-type]
|
52 |
+
torch.ops.aten.stride.default, # type: ignore[has-type]
|
53 |
+
torch.ops.aten.sym_stride.default, # type: ignore[has-type]
|
54 |
+
torch.ops.aten.storage_offset.default, # type: ignore[has-type]
|
55 |
+
torch.ops.aten.sym_storage_offset.default, # type: ignore[has-type]
|
56 |
+
torch.ops.aten.numel.default, # type: ignore[has-type]
|
57 |
+
torch.ops.aten.sym_numel.default, # type: ignore[has-type]
|
58 |
+
torch.ops.aten.dim.default, # type: ignore[has-type]
|
59 |
+
]
|
60 |
+
|
61 |
+
def __new__(cls, elem):
|
62 |
+
assert torch._is_functional_tensor(elem)
|
63 |
+
|
64 |
+
# In general, we'd like our functional tensor subclass to only be in charge of functionalization,
|
65 |
+
# and defer to the inner subclass for all other functionality.
|
66 |
+
# Example: If our inner tensor is a ZeroTensor, we would want to defer running the ZeroTensor fallback
|
67 |
+
# until after we redispatch to our inner ZeroTensor.
|
68 |
+
# However, there are a few keys that we need to mirror between the inner and outer tensors.
|
69 |
+
# Conjugate
|
70 |
+
# Negative
|
71 |
+
# Why? These keys are used to test metadata queries, like `.is_conj()` and `.is_neg()`.
|
72 |
+
# We **need** calls to is_conj() to return the same thing on the outer and inner tensors,
|
73 |
+
# Because user code / framework code that branches like so needs to do the same thing
|
74 |
+
# when it sees the outer FunctionalTensor:
|
75 |
+
# if (x.is_conj()) {
|
76 |
+
# return at::view_as_real(x.resolve_conj());
|
77 |
+
# } else {
|
78 |
+
# return at::view_as_real(x);
|
79 |
+
# }
|
80 |
+
extra_dispatch_keys = (
|
81 |
+
FunctionalTensor._extra_dispatch_keys & torch._C._dispatch_keys(elem)
|
82 |
+
)
|
83 |
+
|
84 |
+
out = torch.Tensor._make_wrapper_subclass( # type: ignore[arg-type, attr-defined]
|
85 |
+
# TODO: right now, _make_wrapper_subclass's dynamic shape interaction is not great.
|
86 |
+
# Calling the overload that has kwargs causes us to go down the first overload path,
|
87 |
+
# which will **always** specialize sizes.
|
88 |
+
# We should probably eventually fix this so that the first overload can just handle dynamic shapes.
|
89 |
+
cls,
|
90 |
+
elem.shape, # sizes
|
91 |
+
elem.stride(), # strides
|
92 |
+
elem.storage_offset(), # storage_offset
|
93 |
+
None, # memory_format
|
94 |
+
elem.dtype, # dtype
|
95 |
+
elem.layout, # layout
|
96 |
+
elem.device, # device
|
97 |
+
False, # pin_memory
|
98 |
+
elem.requires_grad, # requires_grad
|
99 |
+
"sizes", # dispatch_sizes_strides_policy
|
100 |
+
False, # dispatch_device
|
101 |
+
False, # dispatch_layout
|
102 |
+
extra_dispatch_keys, # _extra_dispatch_keys
|
103 |
+
)
|
104 |
+
out.elem = elem
|
105 |
+
return out
|
106 |
+
|
107 |
+
# Need to disable default torch_function. Why?
|
108 |
+
# Default torch_function will always wrap outputs into a subclass if they aren't already a subclass.
|
109 |
+
# We actually.. don't want to do this sometimes, see Note [FunctionalTensorMode inputs are sometimes plain tensors]
|
110 |
+
__torch_function__ = torch._C._disabled_torch_function_impl
|
111 |
+
|
112 |
+
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
|
113 |
+
unrecognized_types = [
|
114 |
+
t
|
115 |
+
for t in types
|
116 |
+
if t not in [torch.Tensor, torch._subclasses.FakeTensor, FunctionalTensor]
|
117 |
+
]
|
118 |
+
if unrecognized_types:
|
119 |
+
not_implemented_log.debug(
|
120 |
+
"FunctionalTensor unrecognized subclass(es): %s", unrecognized_types
|
121 |
+
)
|
122 |
+
return NotImplemented
|
123 |
+
|
124 |
+
if kwargs is None:
|
125 |
+
kwargs = {}
|
126 |
+
|
127 |
+
# FunctionalTensor needs to plumb all metadata requests to the inner tensor.
|
128 |
+
# In theory we don't have to do this - but if we want to service metadata requests here,
|
129 |
+
# we need to carefully make sure all metadata is accurate (including metadata mutations)
|
130 |
+
if func in FunctionalTensor.metadata_fns:
|
131 |
+
|
132 |
+
def unwrap(x):
|
133 |
+
return x.elem
|
134 |
+
|
135 |
+
assert len(args) == 1 and isinstance(args[0], FunctionalTensor)
|
136 |
+
assert len(kwargs) == 0
|
137 |
+
# All metadata accesses should be plumbed to the inner tensor, that way we don't have to worry
|
138 |
+
# about the problem of keeping metadata in sync between the wrapper and inner tensor.
|
139 |
+
# This also alleviates us from having to manually handle metadata mutations on the wrapper.
|
140 |
+
return func(args[0].elem)
|
141 |
+
# Originally I tried to implement my subclass without giving it a torch_dispatch, but I gave up:
|
142 |
+
# - _make_wrapper_subclass requires a __torch_dispatch__
|
143 |
+
# - If we want to use _make_subclass(), we have a problem: the subclass will share a TensorImpl with the inner tensor,
|
144 |
+
# which is of type FunctionalTensorWrapper! We explicitly do not want our wrapper to be a FunctionalTensorWrapper.
|
145 |
+
# - If we use the default tensor.__new__(), we have another problem: it returns inner_tensor.alias(),
|
146 |
+
# which causes every subclass created above autograd to have autograd view metadata
|
147 |
+
# (in addition to also being a FunctionalTensorWrapper).
|
148 |
+
raise RuntimeError(
|
149 |
+
"Attempting to use FunctionalTensor on its own. Instead, please use it with a corresponding FunctionalTensorMode()"
|
150 |
+
)
|
151 |
+
|
152 |
+
def __repr__(self):
|
153 |
+
return f"FunctionalTensor({repr(self.elem)})"
|
154 |
+
|
155 |
+
@staticmethod
|
156 |
+
def to_functional(x):
|
157 |
+
# We will do the wrapping for the user.
|
158 |
+
assert not torch._is_functional_tensor(x)
|
159 |
+
# The only autograd metadata we care about on the FunctionalTensor is:
|
160 |
+
# - requires_grad (so autograd runs)
|
161 |
+
# - is_leaf (so that mutations on graph inputs that are not leaves are allowed by the autograd engine)
|
162 |
+
# this is handled by FunctionalTensor.to_functional
|
163 |
+
x_functional = torch._to_functional_tensor(x)
|
164 |
+
# Technically the FunctionalTensormode here is unnecessary,
|
165 |
+
# but it avoids spurious NotImplemented logs during `ProxyTorchDispatchMode` tracing.
|
166 |
+
# _mirror_autograd_meta_to queries tensor sizes,
|
167 |
+
# and otherwise the sym_size() call will go to the proxy mode before hitting
|
168 |
+
# FunctionalTensor.__torch_dispatch__
|
169 |
+
with FunctionalTensorMode():
|
170 |
+
torch._mirror_autograd_meta_to(x, x_functional) # type: ignore[attr-defined]
|
171 |
+
out = FunctionalTensor(x_functional)
|
172 |
+
torch._mirror_autograd_meta_to(x_functional, out) # type: ignore[attr-defined]
|
173 |
+
return out
|
174 |
+
|
175 |
+
def from_functional(self):
|
176 |
+
torch._sync(self)
|
177 |
+
return torch._from_functional_tensor(self.elem)
|
178 |
+
|
179 |
+
def replace_(self, output) -> None:
|
180 |
+
torch._functionalize_replace(self.elem, output)
|
181 |
+
|
182 |
+
def commit_update(self) -> None:
|
183 |
+
torch._functionalize_commit_update(self.elem)
|
184 |
+
|
185 |
+
def sync(self) -> None:
|
186 |
+
torch._functionalize_sync(self.elem)
|
187 |
+
|
188 |
+
def mark_mutation_hidden_from_autograd(self) -> None:
|
189 |
+
torch._functionalize_mark_mutation_hidden_from_autograd(self.elem)
|
190 |
+
|
191 |
+
|
192 |
+
class FunctionalTensorMode(TorchDispatchMode):
|
193 |
+
def __init__(self):
|
194 |
+
self.is_on_stack = False
|
195 |
+
self.enter_stack = []
|
196 |
+
# Indicates to our torch_dispatch dispatching infra that
|
197 |
+
# this is an "infra" mode with lower dispatching precedence.
|
198 |
+
self._mode_key = torch._C._TorchDispatchModeKey.FUNCTIONAL
|
199 |
+
# This will be turned off later for pre-dispatch functionalization
|
200 |
+
self.decompose_composite_implicit_ops = True
|
201 |
+
|
202 |
+
# No-op if FunctionalTensorMode is already in use
|
203 |
+
def __enter__(self):
|
204 |
+
if (
|
205 |
+
torch._C._get_dispatch_mode(torch._C._TorchDispatchModeKey.FUNCTIONAL)
|
206 |
+
is None
|
207 |
+
):
|
208 |
+
self.enter_stack.append(True)
|
209 |
+
|
210 |
+
return super().__enter__()
|
211 |
+
else:
|
212 |
+
self.enter_stack.append(False)
|
213 |
+
return self
|
214 |
+
|
215 |
+
def __exit__(self, a, b, c):
|
216 |
+
is_on_stack = self.enter_stack.pop()
|
217 |
+
if is_on_stack:
|
218 |
+
super().__exit__(a, b, c)
|
219 |
+
|
220 |
+
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
|
221 |
+
if kwargs is None:
|
222 |
+
kwargs = {}
|
223 |
+
|
224 |
+
unrecognized_types = [
|
225 |
+
t
|
226 |
+
for t in types
|
227 |
+
if not issubclass(t, torch._subclasses.FakeTensor)
|
228 |
+
and t not in [torch.Tensor, FunctionalTensor]
|
229 |
+
]
|
230 |
+
if unrecognized_types:
|
231 |
+
not_implemented_log.debug(
|
232 |
+
"FunctionalTensor unrecognized subclass(es): %s", unrecognized_types
|
233 |
+
)
|
234 |
+
return NotImplemented
|
235 |
+
|
236 |
+
if (
|
237 |
+
func not in FunctionalTensor.metadata_fns
|
238 |
+
and self.decompose_composite_implicit_ops
|
239 |
+
# Not all funcs from __torch_dispatch__ are actual dispatcher ops,
|
240 |
+
# e.g. prim.device
|
241 |
+
and torch._C._dispatch_has_kernel(func.name())
|
242 |
+
):
|
243 |
+
with self:
|
244 |
+
# Decomposes CompositeImplicitAutograd ops
|
245 |
+
r = func.decompose(*args, **kwargs)
|
246 |
+
if r is not NotImplemented:
|
247 |
+
return r
|
248 |
+
|
249 |
+
def assert_is_functional(x):
|
250 |
+
assert torch._is_functional_tensor(x)
|
251 |
+
|
252 |
+
def wrap(x):
|
253 |
+
# Only wrap our outputs in subclasses if the inner functionalization call
|
254 |
+
# also wrapped outputs into FunctionalTensorWrappers.
|
255 |
+
# When can this happen? e.g. `torch.div(2, 2)`
|
256 |
+
assert not isinstance(x, FunctionalTensor)
|
257 |
+
if isinstance(x, torch.Tensor) and torch._is_functional_tensor(x):
|
258 |
+
return FunctionalTensor(x)
|
259 |
+
return x
|
260 |
+
|
261 |
+
any_functional_inputs = False
|
262 |
+
|
263 |
+
def unwrap(x):
|
264 |
+
any_functional_inputs = True
|
265 |
+
return x.elem
|
266 |
+
|
267 |
+
from torch._higher_order_ops.auto_functionalize import (
|
268 |
+
can_auto_functionalize,
|
269 |
+
do_auto_functionalize,
|
270 |
+
)
|
271 |
+
|
272 |
+
if can_auto_functionalize(
|
273 |
+
func
|
274 |
+
) and not torch._C._dispatch_has_kernel_for_dispatch_key(
|
275 |
+
func.name(), torch._C.DispatchKey.Functionalize
|
276 |
+
):
|
277 |
+
return do_auto_functionalize(func, args, kwargs)
|
278 |
+
|
279 |
+
args_unwrapped, kwargs_unwrapped = pytree.tree_map_only(
|
280 |
+
FunctionalTensor, unwrap, (args, kwargs)
|
281 |
+
)
|
282 |
+
|
283 |
+
# Expectation: functionalization should not **already** be enabled above our mode.
|
284 |
+
# Why would that be bad? when we return a FunctionalTensor here, we don't want functionalization
|
285 |
+
# to run above this mode and further wrap that output in **another** C++ FunctionalTensorWrapper.
|
286 |
+
is_included = torch._C._dispatch_tls_is_dispatch_key_included(
|
287 |
+
torch._C.DispatchKey.Functionalize
|
288 |
+
)
|
289 |
+
is_excluded = torch._C._dispatch_tls_is_dispatch_key_excluded(
|
290 |
+
torch._C.DispatchKey.Functionalize
|
291 |
+
)
|
292 |
+
assert is_excluded or not is_included
|
293 |
+
include_to_set = (
|
294 |
+
torch._C._dispatch_tls_local_include_set()
|
295 |
+
| torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize)
|
296 |
+
)
|
297 |
+
exclude_to_set = (
|
298 |
+
torch._C._dispatch_tls_local_exclude_set().remove(
|
299 |
+
torch._C.DispatchKey.Functionalize
|
300 |
+
)
|
301 |
+
- FunctionalTensor._extra_dispatch_keys
|
302 |
+
)
|
303 |
+
# All we want to do here is re-use the existing C++ functionalization logic.
|
304 |
+
# This requires swizzling our TLS dispatch keys so that the Functionalize key is active.
|
305 |
+
with torch._C._ForceDispatchKeyGuard(include_to_set, exclude_to_set):
|
306 |
+
try:
|
307 |
+
# By default for python functionalization (for AOTAutograd), we reapply views.
|
308 |
+
old_apply_views = torch._functionalize_enable_reapply_views(True) # type: ignore[attr-defined]
|
309 |
+
outs_unwrapped = func(*args_unwrapped, **kwargs_unwrapped)
|
310 |
+
outs_wrapped = pytree.tree_map_only(torch.Tensor, wrap, outs_unwrapped)
|
311 |
+
finally:
|
312 |
+
torch._disable_functionalization()
|
313 |
+
torch._functionalize_enable_reapply_views(old_apply_views) # type: ignore[attr-defined]
|
314 |
+
|
315 |
+
is_included = torch._C._dispatch_tls_is_dispatch_key_included(
|
316 |
+
torch._C.DispatchKey.Functionalize
|
317 |
+
)
|
318 |
+
is_excluded = torch._C._dispatch_tls_is_dispatch_key_excluded(
|
319 |
+
torch._C.DispatchKey.Functionalize
|
320 |
+
)
|
321 |
+
assert is_excluded or not is_included
|
322 |
+
|
323 |
+
if (
|
324 |
+
# If no outputs are our functional subclass, then don't try to fix up aliasing
|
325 |
+
not any(
|
326 |
+
isinstance(x, FunctionalTensor)
|
327 |
+
for x in pytree.tree_leaves(outs_wrapped)
|
328 |
+
)
|
329 |
+
# Since lift_fresh lifts its argument into a functional tensor, we can skip the
|
330 |
+
# aliasing correction step. Otherwise, we would be setting the storage of a
|
331 |
+
# lifted tensor to that of an unlifted tensor.
|
332 |
+
# Ref: https://github.com/pytorch/pytorch/issues/111506
|
333 |
+
or func == torch.ops.aten.lift_fresh.default
|
334 |
+
):
|
335 |
+
return outs_wrapped
|
336 |
+
# Wrapper tensor subclasses do not have correct aliasing info! Use this util to manually correct the output aliasing.
|
337 |
+
# inplace ops like `aten.add_()` are expected to return inputs **directly**, instead of creating fresh tensor objects.
|
338 |
+
# Use this util to figure out the right thing to return.
|
339 |
+
# If none of our inputs were wrapped, then we have no FunctionalTensor outputs that we need to fix up storages for.
|
340 |
+
return return_and_correct_aliasing(func, args, kwargs, outs_wrapped)
|
341 |
+
|
342 |
+
|
343 |
+
@contextlib.contextmanager
|
344 |
+
def maybe_disable_functional_mode():
|
345 |
+
maybe_func_mode = torch._C._unset_dispatch_mode(
|
346 |
+
torch._C._TorchDispatchModeKey.FUNCTIONAL
|
347 |
+
)
|
348 |
+
try:
|
349 |
+
yield
|
350 |
+
finally:
|
351 |
+
if maybe_func_mode is not None:
|
352 |
+
torch._C._set_dispatch_mode(maybe_func_mode)
|
353 |
+
|
354 |
+
|
355 |
+
# TODO: clean up the redundancy here,
|
356 |
+
# unify on a single context manager for all mode keys.
|
357 |
+
@contextlib.contextmanager
|
358 |
+
def unset_functional_temporarily():
|
359 |
+
old = torch._C._unset_dispatch_mode(torch._C._TorchDispatchModeKey.FUNCTIONAL)
|
360 |
+
try:
|
361 |
+
yield old
|
362 |
+
finally:
|
363 |
+
if old is not None:
|
364 |
+
torch._C._set_dispatch_mode(old)
|
365 |
+
|
366 |
+
|
367 |
+
# This is similar to torch.func.functionalize, but:
|
368 |
+
# - It uses FunctionalTensorMode, and FunctionalTensor (a python subclass).
|
369 |
+
# One important advantage to using this mode is that it will let us
|
370 |
+
# run functionalization underneath __torch_dispatch__,
|
371 |
+
# which we need in AOTAutograd.
|
372 |
+
# - Doing so means that it does not automatically compose with other
|
373 |
+
# functorch transforms, since these transforms always run above __torch_dispatch__.
|
374 |
+
# That's why this util lives here, and not in functorch.
|
375 |
+
def dispatch_functionalize(func):
|
376 |
+
# TODO: pull these from aot autograd
|
377 |
+
def to_fun(t):
|
378 |
+
if isinstance(t, torch.Tensor):
|
379 |
+
return FunctionalTensor.to_functional(t)
|
380 |
+
return t
|
381 |
+
|
382 |
+
def from_fun(t):
|
383 |
+
if not isinstance(t, FunctionalTensor):
|
384 |
+
# quick sanity assert
|
385 |
+
if isinstance(t, torch.Tensor):
|
386 |
+
assert not torch._is_functional_tensor(t)
|
387 |
+
return t
|
388 |
+
torch._sync(t)
|
389 |
+
return torch._from_functional_tensor(t.elem)
|
390 |
+
|
391 |
+
def inner(*args, **kwargs):
|
392 |
+
func_args = pytree.tree_map_only(torch.Tensor, to_fun, args)
|
393 |
+
func_kwargs = pytree.tree_map_only(torch.Tensor, to_fun, kwargs)
|
394 |
+
|
395 |
+
flattened_wrapped_args = pytree.arg_tree_leaves(*func_args)
|
396 |
+
flattened_wrapped_kwargs = pytree.arg_tree_leaves(**func_kwargs)
|
397 |
+
|
398 |
+
disable_above = torch._C._ExcludeDispatchKeyGuard(
|
399 |
+
torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize)
|
400 |
+
)
|
401 |
+
with disable_above, FunctionalTensorMode():
|
402 |
+
func_outputs = func(*func_args, **func_kwargs)
|
403 |
+
outputs = pytree.tree_map_only(FunctionalTensor, from_fun, func_outputs)
|
404 |
+
|
405 |
+
return outputs
|
406 |
+
|
407 |
+
return inner
|
408 |
+
|
409 |
+
|
410 |
+
class BaseFunctionalizeAPI(ABC):
|
411 |
+
@abstractmethod
|
412 |
+
def wrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]:
|
413 |
+
pass
|
414 |
+
|
415 |
+
@abstractmethod
|
416 |
+
def unwrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]:
|
417 |
+
pass
|
418 |
+
|
419 |
+
@abstractmethod
|
420 |
+
def functionalize(self, inner_f: Callable) -> Callable:
|
421 |
+
pass
|
422 |
+
|
423 |
+
@abstractmethod
|
424 |
+
def redispatch_to_next(self) -> ContextManager:
|
425 |
+
pass
|
426 |
+
|
427 |
+
@abstractmethod
|
428 |
+
def replace(self, input_tensor, output_tensor) -> None:
|
429 |
+
pass
|
430 |
+
|
431 |
+
@abstractmethod
|
432 |
+
def commit_update(self, tensor) -> None:
|
433 |
+
pass
|
434 |
+
|
435 |
+
@abstractmethod
|
436 |
+
def sync(self, tensor) -> None:
|
437 |
+
pass
|
438 |
+
|
439 |
+
@abstractmethod
|
440 |
+
def mark_mutation_hidden_from_autograd(self, tensor) -> None:
|
441 |
+
pass
|
442 |
+
|
443 |
+
|
444 |
+
class PythonFunctionalizeAPI(BaseFunctionalizeAPI):
|
445 |
+
def wrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]:
|
446 |
+
return torch.utils._pytree.tree_map_only(
|
447 |
+
FunctionalTensor, FunctionalTensor.to_functional, args
|
448 |
+
)
|
449 |
+
|
450 |
+
def unwrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]:
|
451 |
+
return torch.utils._pytree.tree_map_only(
|
452 |
+
FunctionalTensor, FunctionalTensor.from_functional, args
|
453 |
+
)
|
454 |
+
|
455 |
+
def functionalize(self, inner_f: Callable) -> Callable:
|
456 |
+
return dispatch_functionalize(inner_f)
|
457 |
+
|
458 |
+
def redispatch_to_next(self) -> ContextManager:
|
459 |
+
return unset_functional_temporarily()
|
460 |
+
|
461 |
+
def replace(self, input_tensor, output_tensor) -> None:
|
462 |
+
assert isinstance(input_tensor, FunctionalTensor)
|
463 |
+
assert not isinstance(output_tensor, FunctionalTensor)
|
464 |
+
input_tensor.replace_(output_tensor)
|
465 |
+
|
466 |
+
def commit_update(self, tensor) -> None:
|
467 |
+
assert isinstance(tensor, FunctionalTensor)
|
468 |
+
tensor.commit_update()
|
469 |
+
|
470 |
+
def sync(self, tensor) -> None:
|
471 |
+
assert isinstance(tensor, FunctionalTensor)
|
472 |
+
tensor.sync()
|
473 |
+
|
474 |
+
def mark_mutation_hidden_from_autograd(self, tensor) -> None:
|
475 |
+
assert isinstance(tensor, FunctionalTensor)
|
476 |
+
tensor.mark_mutation_hidden_from_autograd()
|
477 |
+
|
478 |
+
|
479 |
+
class CppFunctionalizeAPI(BaseFunctionalizeAPI):
|
480 |
+
def wrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]:
|
481 |
+
from torch._functorch.eager_transforms import _wrap_all_tensors_to_functional
|
482 |
+
|
483 |
+
return _wrap_all_tensors_to_functional(args, level=0)
|
484 |
+
|
485 |
+
def unwrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]:
|
486 |
+
from torch._functorch.eager_transforms import (
|
487 |
+
_unwrap_all_tensors_from_functional,
|
488 |
+
)
|
489 |
+
|
490 |
+
return _unwrap_all_tensors_from_functional(args, reapply_views=_reapply_views())
|
491 |
+
|
492 |
+
def functionalize(self, inner_f: Callable) -> Callable:
|
493 |
+
return torch.func.functionalize(inner_f)
|
494 |
+
|
495 |
+
def redispatch_to_next(self) -> ContextManager:
|
496 |
+
return torch._C._ExcludeDispatchKeyGuard(
|
497 |
+
torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize)
|
498 |
+
)
|
499 |
+
|
500 |
+
def replace(self, input_tensor, output_tensor) -> None:
|
501 |
+
torch._functionalize_replace(input_tensor, output_tensor)
|
502 |
+
|
503 |
+
def commit_update(self, tensor) -> None:
|
504 |
+
torch._functionalize_commit_update(tensor)
|
505 |
+
|
506 |
+
def sync(self, tensor) -> None:
|
507 |
+
torch._functionalize_sync(tensor)
|
508 |
+
|
509 |
+
def mark_mutation_hidden_from_autograd(self, tensor) -> None:
|
510 |
+
torch._functionalize_mark_mutation_hidden_from_autograd(tensor)
|
511 |
+
|
512 |
+
|
513 |
+
class FunctorchFunctionalizeAPI(BaseFunctionalizeAPI):
|
514 |
+
def __init__(self, interpreter):
|
515 |
+
self.interpreter = interpreter
|
516 |
+
|
517 |
+
def wrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]:
|
518 |
+
from torch._functorch.eager_transforms import _wrap_all_tensors_to_functional
|
519 |
+
|
520 |
+
return _wrap_all_tensors_to_functional(args, level=self.interpreter.level())
|
521 |
+
|
522 |
+
def unwrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]:
|
523 |
+
from torch._functorch.eager_transforms import (
|
524 |
+
_unwrap_all_tensors_from_functional,
|
525 |
+
)
|
526 |
+
|
527 |
+
return _unwrap_all_tensors_from_functional(
|
528 |
+
args, reapply_views=self.interpreter.functionalize_add_back_views()
|
529 |
+
)
|
530 |
+
|
531 |
+
def functionalize(self, inner_f: Callable) -> Callable:
|
532 |
+
return torch.func.functionalize(
|
533 |
+
inner_f,
|
534 |
+
remove="mutations_and_views"
|
535 |
+
if self.interpreter.functionalize_add_back_views()
|
536 |
+
else "mutations",
|
537 |
+
)
|
538 |
+
|
539 |
+
def redispatch_to_next(self) -> ContextManager:
|
540 |
+
return self.interpreter.lower()
|
541 |
+
|
542 |
+
def replace(self, input_tensor, output_tensor) -> None:
|
543 |
+
torch._functionalize_replace(input_tensor, output_tensor)
|
544 |
+
|
545 |
+
def commit_update(self, tensor) -> None:
|
546 |
+
torch._functionalize_commit_update(tensor)
|
547 |
+
|
548 |
+
def sync(self, tensor) -> None:
|
549 |
+
torch._functionalize_sync(tensor)
|
550 |
+
|
551 |
+
def mark_mutation_hidden_from_autograd(self, tensor) -> None:
|
552 |
+
torch._functionalize_mark_mutation_hidden_from_autograd(tensor)
|
env-llmeval/lib/python3.10/site-packages/torch/_subclasses/meta_utils.py
ADDED
@@ -0,0 +1,730 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import contextlib
|
2 |
+
import warnings
|
3 |
+
import weakref
|
4 |
+
from typing import ContextManager, List, Optional, Tuple, TYPE_CHECKING
|
5 |
+
|
6 |
+
import torch
|
7 |
+
from torch._C._functorch import (
|
8 |
+
_unwrap_functional_tensor,
|
9 |
+
_wrap_functional_tensor,
|
10 |
+
current_level,
|
11 |
+
peek_interpreter_stack,
|
12 |
+
TransformType,
|
13 |
+
)
|
14 |
+
from torch._guards import Source
|
15 |
+
|
16 |
+
from torch.multiprocessing.reductions import StorageWeakRef
|
17 |
+
from torch.utils._python_dispatch import (
|
18 |
+
is_traceable_wrapper_subclass,
|
19 |
+
transform_subclass,
|
20 |
+
)
|
21 |
+
from torch.utils.weak import WeakIdRef
|
22 |
+
|
23 |
+
if TYPE_CHECKING:
|
24 |
+
# Import the following modules during type checking to enable code intelligence features,
|
25 |
+
# Do not import unconditionally, as they import sympy and importing sympy is very slow
|
26 |
+
from torch.fx.experimental.symbolic_shapes import SymbolicContext
|
27 |
+
|
28 |
+
DimList = List
|
29 |
+
|
30 |
+
|
31 |
+
def safe_is_leaf(t):
|
32 |
+
try:
|
33 |
+
return t.is_leaf
|
34 |
+
except RuntimeError:
|
35 |
+
# inference mode can trigger this
|
36 |
+
return False
|
37 |
+
|
38 |
+
|
39 |
+
def safe_grad(t):
|
40 |
+
with warnings.catch_warnings():
|
41 |
+
warnings.filterwarnings("ignore", "The .grad attribute of a Tensor")
|
42 |
+
return t.grad
|
43 |
+
|
44 |
+
|
45 |
+
def assert_eq(a, b):
|
46 |
+
assert a == b, f"{a} != {b}"
|
47 |
+
|
48 |
+
|
49 |
+
def assert_metadata_eq(assert_eq, m1, m2, *, skip_symbolic=False):
|
50 |
+
def go(m1, m2):
|
51 |
+
assert_eq(m1.dtype, m2.dtype)
|
52 |
+
if not skip_symbolic:
|
53 |
+
assert_eq(m1.shape, m2.shape)
|
54 |
+
assert_eq(m1.requires_grad, m2.requires_grad)
|
55 |
+
assert_eq(m1.is_leaf, m2.is_leaf)
|
56 |
+
assert_eq(m1.grad_fn is None, m2.grad_fn is None)
|
57 |
+
assert_eq(m1.is_sparse, m2.is_sparse)
|
58 |
+
assert_eq(m1.is_inference(), m2.is_inference())
|
59 |
+
assert_eq(m1.is_conj(), m2.is_conj())
|
60 |
+
assert_eq(m1.is_neg(), m2.is_neg())
|
61 |
+
assert_eq(safe_grad(m1) is not None, safe_grad(m2) is not None)
|
62 |
+
if safe_grad(m1) is not None:
|
63 |
+
go(safe_grad(m1), safe_grad(m2))
|
64 |
+
if m1.is_sparse:
|
65 |
+
assert_eq(m1.dense_dim(), m2.dense_dim())
|
66 |
+
assert_eq(m1.sparse_dim(), m2.sparse_dim())
|
67 |
+
assert_eq(m1.is_coalesced(), m2.is_coalesced())
|
68 |
+
else:
|
69 |
+
if not skip_symbolic:
|
70 |
+
assert_eq(m1.stride(), m2.stride())
|
71 |
+
assert_eq(m1.storage_offset(), m2.storage_offset())
|
72 |
+
assert_eq(m1._is_view(), m2._is_view())
|
73 |
+
if m1._is_view():
|
74 |
+
go(m1._base, m2._base)
|
75 |
+
# TODO: test if is resizable (no direct query for this atm)
|
76 |
+
# TODO: audit AutogradMeta to see if it matches
|
77 |
+
# TODO: test forward AD
|
78 |
+
|
79 |
+
return go(m1, m2)
|
80 |
+
|
81 |
+
|
82 |
+
# This is a class for converting multiple tensors into meta tensors which
|
83 |
+
# share the same view/storage structure. The operation model is you allocate
|
84 |
+
# one of these, and then call it repeatedly on all the tensors you want to
|
85 |
+
# convert. It's important to use the same object for tensors you want to
|
86 |
+
# share storage because this is how we correlate shared storages to the same
|
87 |
+
# meta storages. This class will hold weak references to cached tenosrs
|
88 |
+
# and tensor storages.
|
89 |
+
class MetaConverter:
|
90 |
+
def __init__(self):
|
91 |
+
self.storage_memo = {}
|
92 |
+
self.tensor_memo: weakref.WeakValueDictionary = weakref.WeakValueDictionary()
|
93 |
+
self.maybe_storages_to_delete = []
|
94 |
+
self.check_expired_frequency = 128
|
95 |
+
self.check_expired_count = 0
|
96 |
+
self.hit = 0
|
97 |
+
self.miss = 0
|
98 |
+
self.del_hook = None
|
99 |
+
self.arg_cnt = 0
|
100 |
+
|
101 |
+
def successful(self):
|
102 |
+
return self.hit > 0 and self.miss == 0
|
103 |
+
|
104 |
+
def check_for_expired_weak_storages(self):
|
105 |
+
new_li = []
|
106 |
+
stor_to_delete = []
|
107 |
+
for obj in self.maybe_storages_to_delete:
|
108 |
+
if not obj.expired():
|
109 |
+
new_li.append(obj)
|
110 |
+
else:
|
111 |
+
stor_to_delete.append(obj)
|
112 |
+
for obj in stor_to_delete:
|
113 |
+
self.storage_memo.pop(obj, None)
|
114 |
+
self.maybe_storages_to_delete = new_li
|
115 |
+
|
116 |
+
# if for some reason we have aquired many storages which have not expired
|
117 |
+
# even though a tensor with their storage has expired (aliasing or otherwise)
|
118 |
+
# check for expired storages less often so as to bound the amount of work we
|
119 |
+
# do checking for expired storages
|
120 |
+
self.check_expired_frequency = max(
|
121 |
+
self.check_expired_frequency, len(self.maybe_storages_to_delete)
|
122 |
+
)
|
123 |
+
|
124 |
+
def get_tensor_memo(self, t):
|
125 |
+
return self.tensor_memo.get(WeakIdRef(t), None)
|
126 |
+
|
127 |
+
def set_tensor_memo(self, t, v):
|
128 |
+
# hold a weak ref to self, otherwise it will be kept alive
|
129 |
+
# by the del_ten closure
|
130 |
+
self_weak_ref = weakref.ref(self)
|
131 |
+
if t.is_sparse or t.is_mkldnn:
|
132 |
+
weak_st = None
|
133 |
+
else:
|
134 |
+
weak_st = StorageWeakRef(t._typed_storage())
|
135 |
+
tensor_ref_key = WeakIdRef(t)
|
136 |
+
|
137 |
+
def del_ten():
|
138 |
+
# tensor outlives the converter
|
139 |
+
self_ref = self_weak_ref()
|
140 |
+
if self_ref is None:
|
141 |
+
return
|
142 |
+
# on shutdown, tensor_ref_key may not be in memo
|
143 |
+
self_ref.tensor_memo.pop(tensor_ref_key, None)
|
144 |
+
if weak_st and weak_st.expired():
|
145 |
+
self_ref.storage_memo.pop(weak_st, None)
|
146 |
+
elif weak_st is not None:
|
147 |
+
# [expired-storages]
|
148 |
+
# NB: even though the tensor has died,
|
149 |
+
# the deallocation of its storage can take longer,
|
150 |
+
# even when the storage has no other uses/views.
|
151 |
+
# In this case, the StorageWeakRef object will be kept alive
|
152 |
+
# longer than it needs to be, however the storage itself
|
153 |
+
# will be deallocated. We retain the possibly dead storages
|
154 |
+
# and periodically check if any of them are expired and
|
155 |
+
# can be freed.
|
156 |
+
self_ref.maybe_storages_to_delete.append(weak_st)
|
157 |
+
|
158 |
+
weakref.finalize(t, del_ten)
|
159 |
+
self.tensor_memo[tensor_ref_key] = v
|
160 |
+
|
161 |
+
# NB: doesn't actually return a storage, because meta storage is
|
162 |
+
# not supported
|
163 |
+
def meta_storage(self, s, callback):
|
164 |
+
# NB: TypedStorage is freshly allocated and cannot be used as hash
|
165 |
+
# key index.
|
166 |
+
|
167 |
+
# Use a Weak Ref to s in order to not leak memory
|
168 |
+
swr = StorageWeakRef(s)
|
169 |
+
if swr not in self.storage_memo:
|
170 |
+
self.storage_memo[swr] = callback(
|
171 |
+
lambda: torch.empty(s.size(), dtype=torch.uint8, device="meta")
|
172 |
+
).untyped_storage()
|
173 |
+
return self.storage_memo[swr]
|
174 |
+
|
175 |
+
# This function assumes that it's possible to do the conversion
|
176 |
+
# NB: name here is used in a conventional way by Dynamo; it corresponds
|
177 |
+
# precisely to the Source.name() of the tensor we're fakeifying and
|
178 |
+
# corresponds to a valid Python expression. When we construct sub-names
|
179 |
+
# as part of this process, we will maintain this invariant! (Even though
|
180 |
+
# other users of this may not need it this property to be upheld.)
|
181 |
+
def meta_tensor(
|
182 |
+
self,
|
183 |
+
t,
|
184 |
+
shape_env=None,
|
185 |
+
callback=lambda t: t(),
|
186 |
+
source: Optional[Source] = None,
|
187 |
+
symbolic_context: Optional["SymbolicContext"] = None,
|
188 |
+
):
|
189 |
+
from torch._subclasses.fake_tensor import FakeTensor
|
190 |
+
|
191 |
+
if source is None:
|
192 |
+
from torch._dynamo.source import ConstantSource
|
193 |
+
|
194 |
+
# TODO: make a dedicated UnknownSource for this?
|
195 |
+
source = ConstantSource(
|
196 |
+
f"__meta_utils_unknown_tensor{len(self.tensor_memo)}"
|
197 |
+
)
|
198 |
+
|
199 |
+
# This indicates you set no_dispatch() before calling into this
|
200 |
+
# function. This is an error: we may be creating fake tensors and
|
201 |
+
# will perform operations on them which need fake tensor mode to
|
202 |
+
# be active. You will segfault if you are in a no_dispatch() block.
|
203 |
+
assert not torch._C._dispatch_tls_local_exclude_set().has(
|
204 |
+
torch._C.DispatchKey.Python
|
205 |
+
)
|
206 |
+
arg_cnt = self.arg_cnt
|
207 |
+
self.arg_cnt += 1
|
208 |
+
|
209 |
+
# When we make as_strided calls, we end up generating a guard
|
210 |
+
# that the new as_strided tensor is in bounds for the old storage
|
211 |
+
# for the base (since as_strided calls can "bust" out of their
|
212 |
+
# bounding box.) This guard is unnecessary: if a user is able
|
213 |
+
# to provide us a tensor with the view base setup this way, we
|
214 |
+
# don't need to produce a guard, because the fact that they
|
215 |
+
# were able to produce the view base means its in bounds.
|
216 |
+
#
|
217 |
+
# Now, ordinarily, this guard would be harmless. However, the
|
218 |
+
# generated guard refers to variables bound on the base variable.
|
219 |
+
# At the moment, Dynamo doesn't actually guard on x._base, because
|
220 |
+
# according to Voz this results in a lot of spurious invalidations,
|
221 |
+
# and also if the user doesn't directly make use of _base, its
|
222 |
+
# pointless anyway (because programs should be parametric over
|
223 |
+
# whether or not the input tensor is a view or not--unless you're
|
224 |
+
# mutating the input, but that's a whole 'nother ballgame). So
|
225 |
+
# for expediency, we suppress these guards so we don't have to
|
226 |
+
# deal with this (yet, anyway.)
|
227 |
+
#
|
228 |
+
# NB: An old version of this code suppressed guards for ALL operations
|
229 |
+
# happening during meta conversion, not just as_strided calls.
|
230 |
+
# This is too aggressive: we do duck sizing and 0/1 simplification
|
231 |
+
# as we allocate variables, and we do need to register guards for
|
232 |
+
# these cases.
|
233 |
+
maybe_suppress = contextlib.nullcontext
|
234 |
+
if shape_env is not None:
|
235 |
+
maybe_suppress = shape_env.suppress_guards
|
236 |
+
|
237 |
+
def sym_sizes_strides_storage_offset(
|
238 |
+
t, src
|
239 |
+
) -> Tuple[Tuple[int, ...], Tuple[int, ...], int]:
|
240 |
+
if shape_env is not None:
|
241 |
+
if isinstance(t, FakeTensor) and t.fake_mode.shape_env is shape_env:
|
242 |
+
# Don't reallocate the sizes; the shape envs are the same,
|
243 |
+
# so reuse the old sizes/strides/etc
|
244 |
+
return (t.size(), t.stride(), t.storage_offset())
|
245 |
+
else:
|
246 |
+
return shape_env.create_symbolic_sizes_strides_storage_offset(
|
247 |
+
t,
|
248 |
+
src,
|
249 |
+
# Assume that the set of dims that are dynamic are the same between
|
250 |
+
# the wrapper tensor and any inner tensors.
|
251 |
+
# We can revisit this if this assumption does not hold
|
252 |
+
# for any important subclasses later.
|
253 |
+
symbolic_context=symbolic_context,
|
254 |
+
)
|
255 |
+
else:
|
256 |
+
assert symbolic_context is None
|
257 |
+
return (t.size(), t.stride(), t.storage_offset())
|
258 |
+
|
259 |
+
# see expired-storages
|
260 |
+
self.check_expired_count += 1
|
261 |
+
if self.check_expired_count >= self.check_expired_frequency:
|
262 |
+
self.check_for_expired_weak_storages()
|
263 |
+
self.check_expired_count = 0
|
264 |
+
|
265 |
+
if self.get_tensor_memo(t) is None:
|
266 |
+
with torch.inference_mode(t.is_inference()):
|
267 |
+
if t.is_sparse:
|
268 |
+
is_leaf = safe_is_leaf(t)
|
269 |
+
r = callback(
|
270 |
+
lambda: torch.ops.aten._sparse_coo_tensor_with_dims(
|
271 |
+
t.sparse_dim(),
|
272 |
+
t.dense_dim(),
|
273 |
+
t.shape,
|
274 |
+
dtype=t.dtype,
|
275 |
+
layout=torch.sparse_coo,
|
276 |
+
device="meta",
|
277 |
+
)
|
278 |
+
)
|
279 |
+
assert safe_is_leaf(r), "the callback you passed in doesn't detach"
|
280 |
+
# Note [is_coalesced is dispatched]
|
281 |
+
# Strangely enough, is_coalesced() is a dispatched operator,
|
282 |
+
# which means that it will get caught by fake tensor mode.
|
283 |
+
# Ordinarily this would error, but there's some logic in
|
284 |
+
# fake tensor ensure this doesn't happen.
|
285 |
+
r._coalesced_(t.is_coalesced())
|
286 |
+
if t.requires_grad:
|
287 |
+
r.requires_grad = True
|
288 |
+
if t.requires_grad and not is_leaf:
|
289 |
+
with torch.enable_grad():
|
290 |
+
r = r.clone()
|
291 |
+
r._coalesced_(t.is_coalesced())
|
292 |
+
elif t.is_mkldnn:
|
293 |
+
is_leaf = safe_is_leaf(t)
|
294 |
+
sizes, strides, _storage_offset = sym_sizes_strides_storage_offset(
|
295 |
+
t, source
|
296 |
+
)
|
297 |
+
r = callback(
|
298 |
+
lambda: torch.empty_strided(
|
299 |
+
sizes, strides, dtype=t.dtype, device="meta"
|
300 |
+
)
|
301 |
+
)
|
302 |
+
assert safe_is_leaf(r), "the callback you passed in doesn't detach"
|
303 |
+
if t.requires_grad:
|
304 |
+
r.requires_grad = True
|
305 |
+
if t.requires_grad and not is_leaf:
|
306 |
+
with torch.enable_grad():
|
307 |
+
r = r.clone()
|
308 |
+
elif t._is_view():
|
309 |
+
# Construct views in two steps: recursively meta-fy their
|
310 |
+
# base, and then create view(s) off that. NB: doing it
|
311 |
+
# directly from storage is WRONG because this won't cause
|
312 |
+
# version counters to get shared.
|
313 |
+
assert t._is_view()
|
314 |
+
|
315 |
+
from torch._dynamo.source import AttrSource
|
316 |
+
from torch.fx.experimental.symbolic_shapes import (
|
317 |
+
DimDynamic,
|
318 |
+
StatelessSymbolicContext,
|
319 |
+
)
|
320 |
+
|
321 |
+
if shape_env and not t.is_nested and not t._base.is_nested:
|
322 |
+
base_symbolic_context = StatelessSymbolicContext(
|
323 |
+
dynamic_sizes=[DimDynamic.STATIC] * t._base.dim(),
|
324 |
+
constraint_sizes=[None] * t._base.dim(),
|
325 |
+
)
|
326 |
+
else:
|
327 |
+
base_symbolic_context = None
|
328 |
+
base = self.meta_tensor(
|
329 |
+
t._base,
|
330 |
+
shape_env,
|
331 |
+
callback,
|
332 |
+
source=AttrSource(source, "_base"),
|
333 |
+
symbolic_context=base_symbolic_context,
|
334 |
+
)
|
335 |
+
|
336 |
+
def is_c_of_r(complex_dtype, real_dtype):
|
337 |
+
return (
|
338 |
+
utils.is_complex_dtype(complex_dtype)
|
339 |
+
and utils.corresponding_real_dtype(complex_dtype)
|
340 |
+
== real_dtype
|
341 |
+
)
|
342 |
+
|
343 |
+
# In some situations, MetaConverter may be called in a
|
344 |
+
# context where autograd is disabled. For the _is_view
|
345 |
+
# assert to pass, we have to setup the autograd view
|
346 |
+
# metadata anyway. Do this by reenabling the
|
347 |
+
# ADInplaceOrView key. This is kind of a hack.
|
348 |
+
old_exclude = torch._C._dispatch_tls_is_dispatch_key_excluded(
|
349 |
+
torch._C.DispatchKey.ADInplaceOrView
|
350 |
+
)
|
351 |
+
torch._C._dispatch_tls_set_dispatch_key_excluded(
|
352 |
+
torch._C.DispatchKey.ADInplaceOrView, False
|
353 |
+
)
|
354 |
+
try:
|
355 |
+
if base.dtype == t.dtype:
|
356 |
+
pass
|
357 |
+
elif is_c_of_r(base.dtype, t.dtype):
|
358 |
+
base = torch.view_as_real(base)
|
359 |
+
elif is_c_of_r(t.dtype, base.dtype):
|
360 |
+
base = torch.view_as_complex(base)
|
361 |
+
else:
|
362 |
+
# This is not guaranteed to succeed. If it fails, it
|
363 |
+
# means there is another dtype-converting view function
|
364 |
+
# that hasn't been handled here
|
365 |
+
base = base.view(t.dtype)
|
366 |
+
|
367 |
+
# This is very tricky. Naively, you might expect this
|
368 |
+
# to hold:
|
369 |
+
#
|
370 |
+
# if t.requires_grad and not safe_is_leaf(t)
|
371 |
+
# assert t._base.requires_grad
|
372 |
+
#
|
373 |
+
# But it's not true! As you can see in the following
|
374 |
+
# program:
|
375 |
+
#
|
376 |
+
# x = torch.zeros(4)
|
377 |
+
# y = x.view(1, 4)
|
378 |
+
# y.requires_grad = True
|
379 |
+
# z = y.view(1, 1, 4)
|
380 |
+
# assert z._base is x
|
381 |
+
#
|
382 |
+
# So we may have to do *two* views out of the base to
|
383 |
+
# recreate this situation.
|
384 |
+
def _view_from_base(base, t):
|
385 |
+
if t.is_nested:
|
386 |
+
# Nested tensors do not support as_strided, and
|
387 |
+
# hence,always have _view_func available.
|
388 |
+
#
|
389 |
+
# The unsafe version of _view_func omits
|
390 |
+
# checking whether the base passed in has the same
|
391 |
+
# metadata as the original base the view_func
|
392 |
+
# was originally executed with. (1) It is OK here,
|
393 |
+
# because we're calling it on the meta-ified base,
|
394 |
+
# so the metadata is guaranteed to be the same.
|
395 |
+
# (2) It is necessary because we don't actually
|
396 |
+
# want to guard on the base's metadata here.
|
397 |
+
return t._view_func_unsafe(base)
|
398 |
+
else:
|
399 |
+
(
|
400 |
+
sizes,
|
401 |
+
strides,
|
402 |
+
storage_offset,
|
403 |
+
) = sym_sizes_strides_storage_offset(t, source)
|
404 |
+
return base.as_strided(sizes, strides, storage_offset)
|
405 |
+
|
406 |
+
if safe_is_leaf(t):
|
407 |
+
# Leaf views that track view metadata are created by
|
408 |
+
# creating a view inside a no_grad block
|
409 |
+
with torch.no_grad(), maybe_suppress():
|
410 |
+
r = _view_from_base(base, t)
|
411 |
+
# As it's a leaf, we can directly assign requires_grad
|
412 |
+
r.requires_grad = t.requires_grad
|
413 |
+
else:
|
414 |
+
if t._base.requires_grad == t.requires_grad:
|
415 |
+
# Easy case, just run the view op
|
416 |
+
with torch.enable_grad(), maybe_suppress():
|
417 |
+
r = _view_from_base(base, t)
|
418 |
+
|
419 |
+
# NB: We don't actaully faithfully replicate
|
420 |
+
# autograd connectivity, but that doesn't matter
|
421 |
+
# today. See following for more info:
|
422 |
+
# https://gist.github.com/soulitzer/e03f015b314c3f5fcf80888c69390913
|
423 |
+
else:
|
424 |
+
# Obscure case. Create a leaf view and give it the
|
425 |
+
# correct requires_grad, then do the final view.
|
426 |
+
# NB: Can't have a non-leaf without requiring grad!
|
427 |
+
assert t.requires_grad
|
428 |
+
with torch.no_grad():
|
429 |
+
mid = base.view(base.shape)
|
430 |
+
mid.requires_grad = t.requires_grad
|
431 |
+
with torch.enable_grad(), maybe_suppress():
|
432 |
+
r = _view_from_base(mid, t)
|
433 |
+
# The CreationMeta influences whether or not inplace
|
434 |
+
# mutation is an error or not. So we need to make
|
435 |
+
# sure we properly propagate this as well.
|
436 |
+
torch._C._autograd._set_creation_meta(
|
437 |
+
r, torch._C._autograd._get_creation_meta(t)
|
438 |
+
)
|
439 |
+
finally:
|
440 |
+
torch._C._dispatch_tls_set_dispatch_key_excluded(
|
441 |
+
torch._C.DispatchKey.ADInplaceOrView, old_exclude
|
442 |
+
)
|
443 |
+
|
444 |
+
else:
|
445 |
+
is_leaf = safe_is_leaf(t)
|
446 |
+
if not t.is_nested:
|
447 |
+
# Nested tensor subclasses have special logic for
|
448 |
+
# creating symbolic size/strides/storage_offset
|
449 |
+
(
|
450 |
+
sizes,
|
451 |
+
strides,
|
452 |
+
storage_offset,
|
453 |
+
) = sym_sizes_strides_storage_offset(t, source)
|
454 |
+
|
455 |
+
def empty_create(inner_t, inner_src):
|
456 |
+
(
|
457 |
+
inner_sizes,
|
458 |
+
inner_strides,
|
459 |
+
inner_storage_offset,
|
460 |
+
) = sym_sizes_strides_storage_offset(inner_t, inner_src)
|
461 |
+
return torch.empty_strided(
|
462 |
+
inner_sizes,
|
463 |
+
inner_strides,
|
464 |
+
dtype=inner_t.dtype,
|
465 |
+
device="meta",
|
466 |
+
)
|
467 |
+
|
468 |
+
# If we have a subclass that desugars into dense tensors,
|
469 |
+
# perform our callback on each inner tensor.
|
470 |
+
if is_traceable_wrapper_subclass(t):
|
471 |
+
# Note: transform_subclass will use __tensor_unflatten__ to generate
|
472 |
+
# a fresh subclass wrapper, which is why sizes/strides are not passed in
|
473 |
+
# to the creation function here.
|
474 |
+
# We assume that if the inner tensors of the subclass are given symbolic sizes,
|
475 |
+
# their sizes will be used to construct the (symbolic) sizes of the wrapper tensor.
|
476 |
+
from torch._dynamo.source import AttrSource
|
477 |
+
|
478 |
+
if t.is_nested:
|
479 |
+
# Avoid circular import
|
480 |
+
from torch._dynamo.source import (
|
481 |
+
TensorProperty,
|
482 |
+
TensorPropertySource,
|
483 |
+
)
|
484 |
+
|
485 |
+
# For nested tensors, manually do transform_subclass
|
486 |
+
# so we can insert some special processing on ctx
|
487 |
+
attrs, ctx = t.__tensor_flatten__()
|
488 |
+
transformed_tensors_dict = {}
|
489 |
+
orig_shape_env = None
|
490 |
+
for attr in attrs:
|
491 |
+
inner_t = getattr(t, attr)
|
492 |
+
if orig_shape_env is None:
|
493 |
+
orig_shape_env = (
|
494 |
+
inner_t.fake_mode.shape_env
|
495 |
+
if isinstance(inner_t, FakeTensor)
|
496 |
+
else None
|
497 |
+
)
|
498 |
+
transformed_tensors_dict[attr] = callback(
|
499 |
+
lambda: empty_create(
|
500 |
+
inner_t, AttrSource(source, attr)
|
501 |
+
)
|
502 |
+
)
|
503 |
+
# We expect JaggedTensor to have a 'ragged_size' in
|
504 |
+
# its context
|
505 |
+
assert isinstance(ctx, dict)
|
506 |
+
assert "ragged_size" in ctx
|
507 |
+
assert isinstance(t._size[1], torch.SymInt)
|
508 |
+
if orig_shape_env is shape_env:
|
509 |
+
# It's already fake and the shape envs line up, reuse the old size
|
510 |
+
# Do not assert singleton_int; it may already
|
511 |
+
# be a variable
|
512 |
+
ctx["ragged_size"] = t._size[1]
|
513 |
+
else:
|
514 |
+
assert t._size[1].node.singleton_int() is not None
|
515 |
+
# Replace the eager ragged size with our freshly
|
516 |
+
# allocated jagged size that has a source
|
517 |
+
ctx["ragged_size"] = shape_env.create_symintnode(
|
518 |
+
shape_env.create_symbol(
|
519 |
+
t._size[1],
|
520 |
+
TensorPropertySource(
|
521 |
+
source, TensorProperty.SIZE, 1
|
522 |
+
),
|
523 |
+
),
|
524 |
+
hint=t._size[1],
|
525 |
+
)
|
526 |
+
r = type(t).__tensor_unflatten__(
|
527 |
+
transformed_tensors_dict, ctx
|
528 |
+
)
|
529 |
+
else:
|
530 |
+
r = transform_subclass(
|
531 |
+
t,
|
532 |
+
lambda attr, inner_t: callback(
|
533 |
+
lambda: empty_create(
|
534 |
+
inner_t,
|
535 |
+
AttrSource(source, attr),
|
536 |
+
)
|
537 |
+
),
|
538 |
+
)
|
539 |
+
else:
|
540 |
+
r = callback(
|
541 |
+
lambda: torch.empty_strided(
|
542 |
+
sizes,
|
543 |
+
strides,
|
544 |
+
dtype=t.dtype,
|
545 |
+
device="meta",
|
546 |
+
)
|
547 |
+
)
|
548 |
+
assert safe_is_leaf(r), "the callback you passed in doesn't detach"
|
549 |
+
if t.requires_grad:
|
550 |
+
r.requires_grad = t.requires_grad
|
551 |
+
if not is_leaf:
|
552 |
+
# Fake up some autograd history.
|
553 |
+
with torch.enable_grad():
|
554 |
+
# preserve_format is the default, but we want to
|
555 |
+
# emphasize how important it is to preserve
|
556 |
+
# format here
|
557 |
+
r = r.clone(memory_format=torch.preserve_format)
|
558 |
+
|
559 |
+
# Graph-Break for wrapped tensors
|
560 |
+
if torch._C._functorch.is_functorch_wrapped_tensor(t):
|
561 |
+
return NotImplemented
|
562 |
+
|
563 |
+
s = t.untyped_storage()
|
564 |
+
swr = StorageWeakRef(s)
|
565 |
+
if swr not in self.storage_memo and (
|
566 |
+
r.is_nested
|
567 |
+
or (
|
568 |
+
r.stride() == strides
|
569 |
+
and r.storage_offset() == storage_offset
|
570 |
+
)
|
571 |
+
):
|
572 |
+
# You're normal and happy, install the fresh storage into the memo
|
573 |
+
self.storage_memo[swr] = r.untyped_storage()
|
574 |
+
else:
|
575 |
+
# You're in crazy town; somehow you gave us a tensor
|
576 |
+
# that wasn't a view, but had nonzero storage offset,
|
577 |
+
# nontrivial strides (such that clone() couldn't
|
578 |
+
# preserve them), or already aliases with another
|
579 |
+
# tensor's storage. The most typical way to end
|
580 |
+
# up here is with set_. So use set_ to bludgeon this
|
581 |
+
# in.
|
582 |
+
r_s = self.meta_storage(s, callback=callback)
|
583 |
+
# NB: In principle, this should always work, but there
|
584 |
+
# is some subtle difference in the autograd metadata
|
585 |
+
# that means we will backprop the set_ call, even if
|
586 |
+
# r is declared as an input to grad.
|
587 |
+
# See https://github.com/pytorch/pytorch/issues/87956
|
588 |
+
# for the reproducer.
|
589 |
+
# NB: The in_kernel_invocation_manager here is necessary
|
590 |
+
# for fake tensor. If we run the set_ call with fake
|
591 |
+
# tensor on, r will improperly report that it is NOT a
|
592 |
+
# meta tensor but a cpu tensor, and then the set_ call
|
593 |
+
# will fail due to device mismatch. no_dispatch() is
|
594 |
+
# not enough, because the fake tensor will still claim
|
595 |
+
# to be a CPU tensor and you'll end up in the CPU
|
596 |
+
# kernel. Arguably this is a hack; a cleaner way to
|
597 |
+
# solve this is to have a FakeStorage concept which
|
598 |
+
# would report it's CPU device--no problem now! But
|
599 |
+
# this is difficult to do because we don't have storage
|
600 |
+
# subclasses. Relevant test is
|
601 |
+
# DynamicShapesFunctionTests::test_add_dynamic_shapes in
|
602 |
+
# test/dynamo/test_dynamic_shapes.py
|
603 |
+
maybe_fake_mgr: ContextManager[None] = contextlib.nullcontext()
|
604 |
+
from torch._subclasses.fake_tensor import (
|
605 |
+
in_kernel_invocation_manager,
|
606 |
+
maybe_get_fake_mode,
|
607 |
+
)
|
608 |
+
|
609 |
+
mb_fake_mode = maybe_get_fake_mode(r)
|
610 |
+
if mb_fake_mode is not None:
|
611 |
+
maybe_fake_mgr = in_kernel_invocation_manager(mb_fake_mode)
|
612 |
+
with maybe_fake_mgr, torch.no_grad():
|
613 |
+
r.set_(r_s, storage_offset, sizes, strides)
|
614 |
+
|
615 |
+
if safe_grad(t) is not None:
|
616 |
+
from torch._dynamo.source import AttrSource
|
617 |
+
|
618 |
+
r.grad = self.meta_tensor(
|
619 |
+
safe_grad(t),
|
620 |
+
shape_env,
|
621 |
+
callback,
|
622 |
+
source=AttrSource(source, "grad"),
|
623 |
+
symbolic_context=symbolic_context,
|
624 |
+
)
|
625 |
+
torch._C._set_conj(r, t.is_conj())
|
626 |
+
torch._C._set_neg(r, t.is_neg())
|
627 |
+
# This can be skipped if necessary for performance reasons
|
628 |
+
assert_metadata_eq(assert_eq, t, r, skip_symbolic=True)
|
629 |
+
self.set_tensor_memo(t, r)
|
630 |
+
|
631 |
+
return self.get_tensor_memo(t)
|
632 |
+
|
633 |
+
def __call__(
|
634 |
+
self,
|
635 |
+
t,
|
636 |
+
shape_env=None,
|
637 |
+
*,
|
638 |
+
callback=lambda t: t(),
|
639 |
+
source=None,
|
640 |
+
symbolic_context=None,
|
641 |
+
):
|
642 |
+
# TODO: zero tensors? We appear to have eliminated them by
|
643 |
+
# excluding complex for now
|
644 |
+
|
645 |
+
if isinstance(t, torch.Tensor) or is_traceable_wrapper_subclass(t):
|
646 |
+
if t.device.type != "xla" and any(
|
647 |
+
[
|
648 |
+
t.is_sparse_csr,
|
649 |
+
t.layout in [torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc],
|
650 |
+
t.is_quantized,
|
651 |
+
t._is_view() and t._base is not None and t._base.is_sparse,
|
652 |
+
torch._is_functional_tensor(t),
|
653 |
+
t.device.type in ("lazy"),
|
654 |
+
# We need a way to test if a tensor is batched but there
|
655 |
+
# is no official APi to do it
|
656 |
+
# torch._C._is_batched(t),
|
657 |
+
]
|
658 |
+
):
|
659 |
+
# TODO: sparse should support meta
|
660 |
+
# NB technically to('meta') does work but our logging
|
661 |
+
# instrumentation will see the meta conversions and the
|
662 |
+
# tests all break so we just exclude this. In any case
|
663 |
+
# the to conversion isn't really right anyhow.
|
664 |
+
|
665 |
+
if torch._is_functional_tensor(t) and t.device.type != "lazy":
|
666 |
+
if t._is_view():
|
667 |
+
raise RuntimeError(
|
668 |
+
"Cannot safely fakify a view because this process drops the view information right now."
|
669 |
+
)
|
670 |
+
|
671 |
+
st = peek_interpreter_stack()
|
672 |
+
assert (
|
673 |
+
st is None or st.key() == TransformType.Functionalize
|
674 |
+
), "Expect st to be either None or have Functionalize transform key."
|
675 |
+
if st is None:
|
676 |
+
# the case of AOTAutograd
|
677 |
+
torch._sync(t)
|
678 |
+
unwrap_t = torch._from_functional_tensor(t)
|
679 |
+
with torch._dispatch.python.suspend_functionalization():
|
680 |
+
fake_t = self.meta_tensor(
|
681 |
+
unwrap_t,
|
682 |
+
shape_env=shape_env,
|
683 |
+
callback=callback,
|
684 |
+
source=source,
|
685 |
+
symbolic_context=symbolic_context,
|
686 |
+
)
|
687 |
+
out = torch._to_functional_tensor(fake_t)
|
688 |
+
torch._mirror_autograd_meta_to(fake_t, out)
|
689 |
+
return out
|
690 |
+
else:
|
691 |
+
# torch.func.functionalize
|
692 |
+
reapply_views = torch._C._functionalization_reapply_views_tls()
|
693 |
+
unwrap_t = _unwrap_functional_tensor(t, reapply_views)
|
694 |
+
pop_st_ctx = (
|
695 |
+
torch._functorch.pyfunctorch.temporarily_pop_interpreter_stack()
|
696 |
+
)
|
697 |
+
with pop_st_ctx:
|
698 |
+
fake_t = self.meta_tensor(
|
699 |
+
unwrap_t,
|
700 |
+
shape_env=shape_env,
|
701 |
+
callback=callback,
|
702 |
+
source=source,
|
703 |
+
symbolic_context=symbolic_context,
|
704 |
+
)
|
705 |
+
return _wrap_functional_tensor(fake_t, current_level())
|
706 |
+
self.miss += 1
|
707 |
+
return NotImplemented
|
708 |
+
else:
|
709 |
+
self.hit += 1
|
710 |
+
r = self.meta_tensor(
|
711 |
+
t,
|
712 |
+
shape_env=shape_env,
|
713 |
+
callback=callback,
|
714 |
+
source=source,
|
715 |
+
symbolic_context=symbolic_context,
|
716 |
+
)
|
717 |
+
if type(t) is torch.nn.Parameter:
|
718 |
+
# NB: Cannot directly use Parameter constructor
|
719 |
+
# because that would force a detach, not desirable
|
720 |
+
r._is_param = True
|
721 |
+
return r
|
722 |
+
elif torch.overrides.is_tensor_like(t):
|
723 |
+
self.miss += 1
|
724 |
+
return NotImplemented
|
725 |
+
else:
|
726 |
+
# non-Tensor types don't count as hit or miss
|
727 |
+
return t
|
728 |
+
|
729 |
+
|
730 |
+
import torch._prims_common as utils
|
env-llmeval/lib/python3.10/site-packages/torch/_subclasses/schema_check_mode.py
ADDED
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from collections import namedtuple
|
2 |
+
from copy import deepcopy
|
3 |
+
from itertools import combinations
|
4 |
+
|
5 |
+
import torch
|
6 |
+
from torch.fx.operator_schemas import normalize_function
|
7 |
+
from torch.testing._internal.jit_utils import clone_inputs
|
8 |
+
from torch.utils import _pytree as pytree
|
9 |
+
from torch.utils._python_dispatch import TorchDispatchMode
|
10 |
+
from torch.utils._pytree import tree_map
|
11 |
+
|
12 |
+
# Named Tuples used within SchemaCheckMode
|
13 |
+
Mutation = namedtuple("Mutation", ["op_name", "arg_name"])
|
14 |
+
Aliasing = namedtuple("Aliasing", ["op_name", "arg_name", "output_number"])
|
15 |
+
|
16 |
+
# Simplified naming for C++ classes
|
17 |
+
SchemaArgument = torch._C._SchemaArgument
|
18 |
+
SchemaArgType = torch._C._SchemaArgType
|
19 |
+
SchemaInfo = torch._C._SchemaInfo
|
20 |
+
|
21 |
+
# This TorchDispatchMode Subclass is used to verify op schemas
|
22 |
+
# This TorchDispatchMode Scubclass currently:
|
23 |
+
# - Records the called ops
|
24 |
+
# - Checks for mutations on all inputs
|
25 |
+
# - Checks for aliasing on all inputs
|
26 |
+
|
27 |
+
|
28 |
+
class SchemaCheckMode(TorchDispatchMode):
|
29 |
+
def __init__(self):
|
30 |
+
# Information recorded for testing purposes. For example:
|
31 |
+
# - incorrect schemas
|
32 |
+
# - overly conservative schemas
|
33 |
+
self.ops = []
|
34 |
+
self.mutated = []
|
35 |
+
self.aliasing = []
|
36 |
+
|
37 |
+
def reset_cache(self):
|
38 |
+
self.ops.clear()
|
39 |
+
self.mutated.clear()
|
40 |
+
self.aliasing.clear()
|
41 |
+
|
42 |
+
def display_ops(self):
|
43 |
+
print(*self.ops, sep=",")
|
44 |
+
|
45 |
+
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
|
46 |
+
def bitwise_equal(lhs, rhs):
|
47 |
+
if lhs.is_quantized:
|
48 |
+
# TODO: This is only OK if can't have NaN quantized; idk if
|
49 |
+
# this is actually true
|
50 |
+
return torch.equal(lhs, rhs)
|
51 |
+
else:
|
52 |
+
return torch.allclose(lhs, rhs, equal_nan=True)
|
53 |
+
|
54 |
+
def has_mutated(before, after, md):
|
55 |
+
are_tensors = type(before) == torch.Tensor and type(after) == torch.Tensor
|
56 |
+
if (
|
57 |
+
are_tensors
|
58 |
+
and before.layout != torch.sparse_csr
|
59 |
+
and after.layout != torch.sparse_csr
|
60 |
+
):
|
61 |
+
return not (
|
62 |
+
before.size() == after.size()
|
63 |
+
and bitwise_equal(before, after)
|
64 |
+
and md[0] == after.stride()
|
65 |
+
and md[1] == after._typed_storage()._cdata
|
66 |
+
)
|
67 |
+
return False
|
68 |
+
|
69 |
+
def has_aliased(lhs, rhs):
|
70 |
+
try:
|
71 |
+
return torch._C._overlaps(lhs, rhs)
|
72 |
+
except Exception as exception:
|
73 |
+
if str(exception).startswith("Cannot inspect value of type "):
|
74 |
+
return False
|
75 |
+
else:
|
76 |
+
raise exception
|
77 |
+
|
78 |
+
def standardize_name(name):
|
79 |
+
return name if name != "self" else "input"
|
80 |
+
|
81 |
+
def unwrap(e):
|
82 |
+
if isinstance(e, torch.Tensor) and not type(e) == torch.Tensor:
|
83 |
+
try:
|
84 |
+
return e.elem
|
85 |
+
except AttributeError as t:
|
86 |
+
return e
|
87 |
+
return e
|
88 |
+
|
89 |
+
def parse_metadata(e):
|
90 |
+
if isinstance(e, torch.Tensor):
|
91 |
+
if not type(e) == torch.Tensor:
|
92 |
+
try:
|
93 |
+
current = e.elem
|
94 |
+
return (
|
95 |
+
deepcopy(current.stride()),
|
96 |
+
current._typed_storage()._cdata,
|
97 |
+
)
|
98 |
+
except AttributeError as t:
|
99 |
+
return None
|
100 |
+
# Sparse CSR tensors do not have strides or storage
|
101 |
+
elif e.layout != torch.sparse_csr:
|
102 |
+
return (deepcopy(e.stride()), e._typed_storage()._cdata)
|
103 |
+
return None
|
104 |
+
|
105 |
+
self.ops.append(func._schema.name)
|
106 |
+
|
107 |
+
# Clone and process arguments and outputs
|
108 |
+
pre_arguments = normalize_function(
|
109 |
+
func, args, kwargs, normalize_to_only_use_kwargs=True
|
110 |
+
).kwargs
|
111 |
+
|
112 |
+
c_p_args = dict(zip(pre_arguments.keys(), clone_inputs(pre_arguments.values())))
|
113 |
+
cloned_arguments = {
|
114 |
+
name: tree_map(unwrap, c_p_args.get(name)) for name in c_p_args
|
115 |
+
}
|
116 |
+
cloned_metadata = {
|
117 |
+
name: [
|
118 |
+
parse_metadata(a) for a in pytree.tree_leaves(pre_arguments.get(name))
|
119 |
+
]
|
120 |
+
for name in pre_arguments
|
121 |
+
}
|
122 |
+
|
123 |
+
out = func(*args, **kwargs)
|
124 |
+
arguments = {
|
125 |
+
name: tree_map(unwrap, pre_arguments.get(name)) for name in pre_arguments
|
126 |
+
}
|
127 |
+
tuple_out = out if isinstance(out, tuple) else (out,)
|
128 |
+
tuple_out = tree_map(unwrap, tuple_out)
|
129 |
+
|
130 |
+
schema_info = SchemaInfo(func._schema)
|
131 |
+
schema_info.add_argument_values(pre_arguments)
|
132 |
+
|
133 |
+
# Process arguments with outputs
|
134 |
+
for i in range(len(func._schema.arguments)):
|
135 |
+
arg = func._schema.arguments[i]
|
136 |
+
name = standardize_name(arg.name)
|
137 |
+
if arguments.get(name) is not None:
|
138 |
+
before = cloned_arguments.get(name)
|
139 |
+
md = cloned_metadata.get(name)
|
140 |
+
after = arguments.get(name)
|
141 |
+
for j in range(len(tuple_out)):
|
142 |
+
# aten::_unsafe_view is intended to have incorrect aliasing notation (hence unsafe)
|
143 |
+
unsafe_ops = ("aten::_unsafe_view", "aten::unsafe_split")
|
144 |
+
if (
|
145 |
+
has_aliased(tuple_out[j], after)
|
146 |
+
and func._schema.name not in unsafe_ops
|
147 |
+
):
|
148 |
+
if not schema_info.may_contain_alias(
|
149 |
+
SchemaArgument(SchemaArgType.output, j),
|
150 |
+
SchemaArgument(SchemaArgType.input, i),
|
151 |
+
):
|
152 |
+
raise RuntimeError(
|
153 |
+
f"Argument {name} is not defined to alias output but was aliasing"
|
154 |
+
)
|
155 |
+
else:
|
156 |
+
self.aliasing.append(
|
157 |
+
Aliasing(func._schema.name, name, f"output_{j}")
|
158 |
+
)
|
159 |
+
if after is tuple_out[j] and isinstance(after, torch.Tensor):
|
160 |
+
# Only mutable ops e.g. (add_, add.out) are allowed to directly return inputs.
|
161 |
+
if not schema_info.is_mutable(
|
162 |
+
SchemaArgument(SchemaArgType.input, i)
|
163 |
+
) and func not in [
|
164 |
+
torch.ops.aten.lift.default,
|
165 |
+
torch.ops.aten.lift_fresh.default,
|
166 |
+
]:
|
167 |
+
raise RuntimeError(
|
168 |
+
f"""\
|
169 |
+
Dispatcher operators below autograd are not allowed to directly return inputs.
|
170 |
+
However, we found that `outputs[{str(j)}] is {name}"""
|
171 |
+
)
|
172 |
+
if any(
|
173 |
+
has_mutated(a, b, c)
|
174 |
+
for a, b, c in zip(
|
175 |
+
pytree.tree_leaves(before), pytree.tree_leaves(after), md
|
176 |
+
)
|
177 |
+
):
|
178 |
+
if not schema_info.is_mutable(
|
179 |
+
SchemaArgument(SchemaArgType.input, i)
|
180 |
+
):
|
181 |
+
raise RuntimeError(
|
182 |
+
f"Argument {name} is not defined as mutable but was mutated"
|
183 |
+
)
|
184 |
+
else:
|
185 |
+
self.mutated.append(Mutation(func._schema.name, name))
|
186 |
+
|
187 |
+
# Aliasing between outputs
|
188 |
+
for i, j in combinations(range(len(func._schema.returns)), 2):
|
189 |
+
if has_aliased(tuple_out[i], tuple_out[j]):
|
190 |
+
if not schema_info.may_contain_alias(
|
191 |
+
SchemaArgument(SchemaArgType.output, i),
|
192 |
+
SchemaArgument(SchemaArgType.output, j),
|
193 |
+
):
|
194 |
+
raise RuntimeError(f"Outputs {i} and {j} alias unexpectedly")
|
195 |
+
|
196 |
+
return out
|
env-llmeval/lib/python3.10/site-packages/torch/backends/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (2.11 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/backends/_coreml/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/torch/backends/_coreml/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (187 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/backends/_coreml/__pycache__/preprocess.cpython-310.pyc
ADDED
Binary file (3.74 kB). View file
|
|