Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step120/zero/14.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step120/zero/21.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step120/zero/25.input_layernorm.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step120/zero/25.input_layernorm.weight/fp32.pt +3 -0
- venv/lib/python3.10/site-packages/aiosignal/__init__.py +36 -0
- venv/lib/python3.10/site-packages/aiosignal/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/__init__.py +10 -0
- venv/lib/python3.10/site-packages/torchgen/api/__init__.py +0 -0
- venv/lib/python3.10/site-packages/torchgen/api/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/api/__pycache__/autograd.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/api/__pycache__/cpp.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/api/__pycache__/dispatcher.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/api/__pycache__/functionalization.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/api/__pycache__/lazy.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/api/__pycache__/meta.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/api/__pycache__/native.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/api/__pycache__/python.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/api/__pycache__/structured.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/api/__pycache__/translate.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/api/__pycache__/ufunc.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/api/__pycache__/unboxing.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/api/autograd.py +853 -0
- venv/lib/python3.10/site-packages/torchgen/api/cpp.py +467 -0
- venv/lib/python3.10/site-packages/torchgen/api/dispatcher.py +118 -0
- venv/lib/python3.10/site-packages/torchgen/api/functionalization.py +199 -0
- venv/lib/python3.10/site-packages/torchgen/api/lazy.py +464 -0
- venv/lib/python3.10/site-packages/torchgen/api/meta.py +12 -0
- venv/lib/python3.10/site-packages/torchgen/api/native.py +153 -0
- venv/lib/python3.10/site-packages/torchgen/api/python.py +1509 -0
- venv/lib/python3.10/site-packages/torchgen/api/structured.py +157 -0
- venv/lib/python3.10/site-packages/torchgen/api/translate.py +430 -0
- venv/lib/python3.10/site-packages/torchgen/api/types/__init__.py +3 -0
- venv/lib/python3.10/site-packages/torchgen/api/types/__pycache__/types.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/api/types/__pycache__/types_base.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/api/types/signatures.py +423 -0
- venv/lib/python3.10/site-packages/torchgen/api/types/types.py +190 -0
- venv/lib/python3.10/site-packages/torchgen/api/types/types_base.py +270 -0
- venv/lib/python3.10/site-packages/torchgen/api/ufunc.py +209 -0
- venv/lib/python3.10/site-packages/torchgen/api/unboxing.py +248 -0
- venv/lib/python3.10/site-packages/torchgen/code_template.py +96 -0
- venv/lib/python3.10/site-packages/torchgen/context.py +128 -0
- venv/lib/python3.10/site-packages/torchgen/dest/__init__.py +19 -0
- venv/lib/python3.10/site-packages/torchgen/dest/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/dest/__pycache__/lazy_ir.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/dest/__pycache__/lazy_ts_lowering.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/dest/__pycache__/native_functions.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/dest/__pycache__/register_dispatch_key.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/dest/__pycache__/ufunc.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/dest/lazy_ir.py +707 -0
- venv/lib/python3.10/site-packages/torchgen/dest/lazy_ts_lowering.py +48 -0
ckpts/universal/global_step120/zero/14.attention.query_key_value.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8aa9d66bdc93495ba69491f982e6a49e0ac7d1cfd4b3123c8e99a849985627c5
|
3 |
+
size 50332843
|
ckpts/universal/global_step120/zero/21.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d2daaf933e67235534974accfa5c3756e31522e2a86fe7758a9bc3a8b9eb2b64
|
3 |
+
size 33555612
|
ckpts/universal/global_step120/zero/25.input_layernorm.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9f5285c0e29af6457c82b6b183c772fb084a45a225e5f75d473df08a859a00be
|
3 |
+
size 9372
|
ckpts/universal/global_step120/zero/25.input_layernorm.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:86f5112ae58aec18b2e20475ecd122be4e774c2b5549101bac653a3e903a6abb
|
3 |
+
size 9293
|
venv/lib/python3.10/site-packages/aiosignal/__init__.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from frozenlist import FrozenList
|
2 |
+
|
3 |
+
__version__ = "1.3.1"
|
4 |
+
|
5 |
+
__all__ = ("Signal",)
|
6 |
+
|
7 |
+
|
8 |
+
class Signal(FrozenList):
|
9 |
+
"""Coroutine-based signal implementation.
|
10 |
+
|
11 |
+
To connect a callback to a signal, use any list method.
|
12 |
+
|
13 |
+
Signals are fired using the send() coroutine, which takes named
|
14 |
+
arguments.
|
15 |
+
"""
|
16 |
+
|
17 |
+
__slots__ = ("_owner",)
|
18 |
+
|
19 |
+
def __init__(self, owner):
|
20 |
+
super().__init__()
|
21 |
+
self._owner = owner
|
22 |
+
|
23 |
+
def __repr__(self):
|
24 |
+
return "<Signal owner={}, frozen={}, {!r}>".format(
|
25 |
+
self._owner, self.frozen, list(self)
|
26 |
+
)
|
27 |
+
|
28 |
+
async def send(self, *args, **kwargs):
|
29 |
+
"""
|
30 |
+
Sends data to all registered receivers.
|
31 |
+
"""
|
32 |
+
if not self.frozen:
|
33 |
+
raise RuntimeError("Cannot send non-frozen signal.")
|
34 |
+
|
35 |
+
for receiver in self:
|
36 |
+
await receiver(*args, **kwargs) # type: ignore
|
venv/lib/python3.10/site-packages/aiosignal/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.34 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/__init__.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""torchgen
|
2 |
+
|
3 |
+
This module contains codegeneration utilities for PyTorch. It is used to
|
4 |
+
build PyTorch from source, but may also be used for out-of-tree projects
|
5 |
+
that extend PyTorch.
|
6 |
+
|
7 |
+
Note well that we provide no BC guarantees for torchgen. If you're interested
|
8 |
+
in using torchgen and want the PyTorch team to be aware, please reach out
|
9 |
+
on GitHub.
|
10 |
+
"""
|
venv/lib/python3.10/site-packages/torchgen/api/__init__.py
ADDED
File without changes
|
venv/lib/python3.10/site-packages/torchgen/api/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (180 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torchgen/api/__pycache__/autograd.cpython-310.pyc
ADDED
Binary file (17 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/api/__pycache__/cpp.cpython-310.pyc
ADDED
Binary file (9.17 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/api/__pycache__/dispatcher.cpython-310.pyc
ADDED
Binary file (2.68 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/api/__pycache__/functionalization.cpython-310.pyc
ADDED
Binary file (3.69 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/api/__pycache__/lazy.cpython-310.pyc
ADDED
Binary file (11.3 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/api/__pycache__/meta.cpython-310.pyc
ADDED
Binary file (420 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torchgen/api/__pycache__/native.cpython-310.pyc
ADDED
Binary file (3.19 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/api/__pycache__/python.cpython-310.pyc
ADDED
Binary file (28.3 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/api/__pycache__/structured.cpython-310.pyc
ADDED
Binary file (3.65 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/api/__pycache__/translate.cpython-310.pyc
ADDED
Binary file (7.44 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/api/__pycache__/ufunc.cpython-310.pyc
ADDED
Binary file (4.59 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/api/__pycache__/unboxing.cpython-310.pyc
ADDED
Binary file (4.34 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/api/autograd.py
ADDED
@@ -0,0 +1,853 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
from dataclasses import dataclass
|
3 |
+
from typing import cast, Dict, List, Match, Optional, Sequence, Set, Tuple
|
4 |
+
|
5 |
+
from torchgen import local
|
6 |
+
|
7 |
+
from torchgen.api import cpp
|
8 |
+
from torchgen.api.types import BaseCType, Binding, NamedCType, tensorListT
|
9 |
+
from torchgen.model import (
|
10 |
+
BaseTy,
|
11 |
+
BaseType,
|
12 |
+
FunctionSchema,
|
13 |
+
ListType,
|
14 |
+
NativeFunction,
|
15 |
+
NativeFunctionsViewGroup,
|
16 |
+
SchemaKind,
|
17 |
+
Type,
|
18 |
+
)
|
19 |
+
from torchgen.utils import IDENT_REGEX
|
20 |
+
|
21 |
+
|
22 |
+
# Represents a saved attribute involved in backward calculation.
|
23 |
+
# Note that it can be a derived property of an input argument, e.g.:
|
24 |
+
# we could save `other.scalar_type()` instead of the entire `other` tensor.
|
25 |
+
@dataclass(frozen=True)
|
26 |
+
class SavedAttribute:
|
27 |
+
# The NamedCType holds the updated name and cpp type of the attribute
|
28 |
+
# for the name, Suffix is appended if it's derived property, e.g.: `other_scalar_type`
|
29 |
+
nctype: NamedCType
|
30 |
+
|
31 |
+
# The expression to read the derived property at save time, e.g.:
|
32 |
+
# `other.scalar_type()`.
|
33 |
+
expr: str
|
34 |
+
|
35 |
+
|
36 |
+
# Represents a backward formula that calculates derivatives for one
|
37 |
+
# or more tensors.
|
38 |
+
@dataclass(frozen=True)
|
39 |
+
class Derivative:
|
40 |
+
# The formula string (legit C++ expression).
|
41 |
+
# Note that expressions against input arguments have been replaced with the
|
42 |
+
# corresponding saved attributes.
|
43 |
+
# E.g.:
|
44 |
+
# raw formula: `mul_tensor_backward(grad, self, other.scalar_type())`
|
45 |
+
# here: `mul_tensor_backward(grad, self, other_scalar_type)`
|
46 |
+
formula: str
|
47 |
+
|
48 |
+
# The formula string before input argument replacement
|
49 |
+
original_formula: str
|
50 |
+
|
51 |
+
# Names of the arguments for which this formula calculates derivatives.
|
52 |
+
var_names: Tuple[str, ...]
|
53 |
+
|
54 |
+
# Saved inputs that are referenced by the formula.
|
55 |
+
saved_inputs: Tuple[SavedAttribute, ...]
|
56 |
+
|
57 |
+
# Saved outputs that are referenced by the formula.
|
58 |
+
saved_outputs: Tuple[SavedAttribute, ...]
|
59 |
+
|
60 |
+
# Gradients that are referenced by name in the formula.
|
61 |
+
named_gradients: Set[str]
|
62 |
+
|
63 |
+
|
64 |
+
# Represents a forward formula that calculates forward derivatives
|
65 |
+
# for one tensor.
|
66 |
+
@dataclass(frozen=True)
|
67 |
+
class ForwardDerivative:
|
68 |
+
# The formula string (legit C++ expression).
|
69 |
+
# Note that special keywords such as "linear" or "element_wise" have been
|
70 |
+
# replaced by the automatically generated formula.
|
71 |
+
formula: str
|
72 |
+
|
73 |
+
# Name of the output arguments for which this formula calculates forward
|
74 |
+
# derivatives
|
75 |
+
var_names: Tuple[str, ...]
|
76 |
+
|
77 |
+
# Type of the output arguments for which this formula calculates forward
|
78 |
+
# derivatives
|
79 |
+
var_types: Tuple[Type, ...]
|
80 |
+
|
81 |
+
# Inputs for which the forward derivatives are required for this formula
|
82 |
+
required_inputs_fw_grad: Optional[Tuple[str, ...]]
|
83 |
+
|
84 |
+
# Inputs for which the primal is required for this formula
|
85 |
+
required_inputs_primal: Optional[Tuple[str, ...]]
|
86 |
+
|
87 |
+
# Flag to specify if this formula requires the original value of self
|
88 |
+
# This is only used by inplace operations
|
89 |
+
required_original_self_value: bool
|
90 |
+
|
91 |
+
# If this formula is specified in derivatives.yaml or if we are re-using the
|
92 |
+
# out of place formula for inplace
|
93 |
+
is_reusing_outplace_formula: bool
|
94 |
+
|
95 |
+
|
96 |
+
# Represents differentiability info for a NativeFunction.
|
97 |
+
@dataclass(frozen=True)
|
98 |
+
class DifferentiabilityInfo:
|
99 |
+
# The base name read from derivatives.yaml.
|
100 |
+
name: str
|
101 |
+
|
102 |
+
# The matching native function.
|
103 |
+
#
|
104 |
+
# There can be multiple NativeFunction having the same base name:
|
105 |
+
# - different overloads with different types of input arguments;
|
106 |
+
# - in-place/out/functional variants of the same function;
|
107 |
+
#
|
108 |
+
# We first use the schema string (under the 'name' key) in derivatives.yaml
|
109 |
+
# to find the NativeFunction having the same schema string.
|
110 |
+
# Then we find the in-place/out/functional variants of the matching function.
|
111 |
+
# Among these variants, we choose the one having the same name as the
|
112 |
+
# derivatives.yaml entry. If there is no exact match, then we choose the
|
113 |
+
# in-place variant.
|
114 |
+
# TODO: maybe the logic to search for all variants is no longer necessary?
|
115 |
+
func: NativeFunction
|
116 |
+
|
117 |
+
# The name of the generated autograd function.
|
118 |
+
# It's set only if we will calculate a derivative, i.e.
|
119 |
+
# 'args_with_derivatives' is not empty.
|
120 |
+
op: Optional[str]
|
121 |
+
|
122 |
+
# The derivatives formulae for this function.
|
123 |
+
# Note that the length of this sequence is the number of differentiable inputs
|
124 |
+
derivatives: Sequence[Derivative]
|
125 |
+
|
126 |
+
# The forward derivatives formulae for this function.
|
127 |
+
# Note that the length of this sequence is the number of differentiable outputs
|
128 |
+
forward_derivatives: Sequence[ForwardDerivative]
|
129 |
+
|
130 |
+
# The union of 'saved_inputs' of all 'derivatives'.
|
131 |
+
all_saved_inputs: Sequence[SavedAttribute]
|
132 |
+
|
133 |
+
# The union of 'saved_outputs' of all 'derivatives'.
|
134 |
+
all_saved_outputs: Sequence[SavedAttribute]
|
135 |
+
|
136 |
+
# All named gradients that are available for use, in the same
|
137 |
+
# order as in the grads vector.
|
138 |
+
available_named_gradients: Sequence[str]
|
139 |
+
|
140 |
+
# The named gradients that are used in any of the derivatives.
|
141 |
+
# Invariant: all(name in available_named_gradients for name in used_named_gradients)
|
142 |
+
used_named_gradients: Set[str]
|
143 |
+
|
144 |
+
# The function's input arguments for which it calculates derivatives.
|
145 |
+
# It's the union of 'var_names' of all 'derivatives', sorted by the
|
146 |
+
# argument order in the function schema.
|
147 |
+
args_with_derivatives: Sequence[Binding]
|
148 |
+
|
149 |
+
# Names of arguments whose derivative formula is 'non_differentiable'.
|
150 |
+
non_differentiable_arg_names: Sequence[str]
|
151 |
+
|
152 |
+
# Raw data read from derivatives.yaml.
|
153 |
+
output_differentiability: Optional[List[bool]]
|
154 |
+
|
155 |
+
# output_differentiability in derivatives.yaml can be a list of
|
156 |
+
# conditions that express if the output is differentiable. In this case,
|
157 |
+
# the number of conditions must match the number of outputs
|
158 |
+
# (NB: we only support one condition right now).
|
159 |
+
# output_differentiability gets populated with True for each condition,
|
160 |
+
# while output_differentiability_conditions gets populated with the conditions
|
161 |
+
output_differentiability_conditions: Optional[List[str]]
|
162 |
+
|
163 |
+
@property
|
164 |
+
def has_derivatives(self) -> bool:
|
165 |
+
return len(self.args_with_derivatives) > 0
|
166 |
+
|
167 |
+
# Generates a new DifferentiabilityInfo using the exact same set of derivative information,
|
168 |
+
# but with a new operator name.
|
169 |
+
# This is used when generating "copy" variants of view ops,
|
170 |
+
# which are able to use the exact same derivative formula as the original view op
|
171 |
+
# See Note [Codegen'd {view}_copy Operators]
|
172 |
+
def create_view_copy_from_view_derivative(
|
173 |
+
self, g: NativeFunctionsViewGroup
|
174 |
+
) -> Optional["DifferentiabilityInfo"]:
|
175 |
+
if g.view_copy is None:
|
176 |
+
return None
|
177 |
+
f = g.view_copy
|
178 |
+
|
179 |
+
name_split_by_period = self.name.split(".", maxsplit=2)
|
180 |
+
# Append a "_copy" to the base name of the operator (but keep the overload name the same)
|
181 |
+
view_copy_name = f"{name_split_by_period[0]}_copy." + ".".join(
|
182 |
+
name_split_by_period[1:]
|
183 |
+
)
|
184 |
+
view_copy_op_name = None if self.op is None else f"{self.op}_copy"
|
185 |
+
|
186 |
+
return DifferentiabilityInfo(
|
187 |
+
# Use the "_copy" version of name/func/op
|
188 |
+
name=view_copy_name,
|
189 |
+
func=f,
|
190 |
+
op=view_copy_op_name,
|
191 |
+
# But keep all derivative info the same
|
192 |
+
derivatives=self.derivatives,
|
193 |
+
forward_derivatives=self.forward_derivatives,
|
194 |
+
all_saved_inputs=self.all_saved_inputs,
|
195 |
+
all_saved_outputs=self.all_saved_outputs,
|
196 |
+
available_named_gradients=self.available_named_gradients,
|
197 |
+
used_named_gradients=self.used_named_gradients,
|
198 |
+
args_with_derivatives=self.args_with_derivatives,
|
199 |
+
non_differentiable_arg_names=self.non_differentiable_arg_names,
|
200 |
+
output_differentiability=self.output_differentiability,
|
201 |
+
output_differentiability_conditions=self.output_differentiability_conditions,
|
202 |
+
)
|
203 |
+
|
204 |
+
|
205 |
+
def uses_ident(info: Optional[DifferentiabilityInfo], ident: str) -> bool:
|
206 |
+
if info is None:
|
207 |
+
return False
|
208 |
+
for derivative in info.derivatives:
|
209 |
+
formula = derivative.formula
|
210 |
+
if re.search(IDENT_REGEX.format(ident), formula):
|
211 |
+
return True
|
212 |
+
return False
|
213 |
+
|
214 |
+
|
215 |
+
def uses_retain_variables(info: Optional[DifferentiabilityInfo]) -> bool:
|
216 |
+
return uses_ident(info, "retain_variables")
|
217 |
+
|
218 |
+
|
219 |
+
def uses_single_grad(info: Optional[DifferentiabilityInfo]) -> bool:
|
220 |
+
return uses_ident(info, "grad")
|
221 |
+
|
222 |
+
|
223 |
+
# Represents a differentiable `Argument`.
|
224 |
+
# How is it different from the `Argument` type?
|
225 |
+
# - It's processed Arguments which are differentiable and only used in the
|
226 |
+
# context of the autograd codegen;
|
227 |
+
# - It can represent SelfArgument or regular Argument but not TensorOptionsArgument;
|
228 |
+
@dataclass(frozen=True)
|
229 |
+
class DifferentiableInput:
|
230 |
+
name: str
|
231 |
+
type: Type
|
232 |
+
|
233 |
+
# TODO: only to keep it byte-for-byte compatible with the old codegen, should remove.
|
234 |
+
cpp_type: str
|
235 |
+
|
236 |
+
|
237 |
+
# Represents a differentiable `Return`.
|
238 |
+
# How it it different from the `Return` type?
|
239 |
+
# - The name in `Return` is optional. Here it is always populated using the same
|
240 |
+
# `cpp.return_names()` method.
|
241 |
+
# TODO: some cpp naming logic (e.g. resolving name conflict) might be irrelevant?
|
242 |
+
# - It's processed Returns which are differentiable, in compliance with the
|
243 |
+
# `output_differentiability` field defined in derivatives.yaml (if specified),
|
244 |
+
# and are only used in the context of the autograd codegen;
|
245 |
+
@dataclass(frozen=True)
|
246 |
+
class DifferentiableOutput:
|
247 |
+
name: str
|
248 |
+
type: Type
|
249 |
+
|
250 |
+
# TODO: only to keep it byte-for-byte compatible with the old codegen, should remove.
|
251 |
+
cpp_type: str
|
252 |
+
|
253 |
+
|
254 |
+
@dataclass(frozen=True)
|
255 |
+
class NativeFunctionWithDifferentiabilityInfo:
|
256 |
+
func: NativeFunction
|
257 |
+
info: Optional[Dict[str, DifferentiabilityInfo]]
|
258 |
+
fw_derivatives: Optional[Dict[str, Sequence[ForwardDerivative]]]
|
259 |
+
|
260 |
+
|
261 |
+
# TODO: Update comment below since it is out of date.
|
262 |
+
def dispatch_strategy(fn: NativeFunctionWithDifferentiabilityInfo) -> str:
|
263 |
+
"""How are we going to call the underlying implementation of a
|
264 |
+
declaration? There are two strategies:
|
265 |
+
- use_derived: we want to call the implementation on CPUDoubleType
|
266 |
+
(or a similar, derived Type instance). Because these derived
|
267 |
+
instances deal in Tensors, not Variables (it's a completely different
|
268 |
+
object, so it doesn't dispatch back to VariableType), code on
|
269 |
+
this dispatch path needs to wrap/unwrap tensors. If the
|
270 |
+
derived implementation takes and returns tensors, the
|
271 |
+
implementation is usually differentiable (although we also use
|
272 |
+
the derived dispatch path for non-differentiable functions
|
273 |
+
that we still want to dispatch on the derived Type instance;
|
274 |
+
e.g., size())
|
275 |
+
- use_type: we want to call the implementation on Type, because
|
276 |
+
it is implemented concretely, and the functions it invokes will
|
277 |
+
get dispatched back to VariableType (which will ensure that they
|
278 |
+
are differentiable.)
|
279 |
+
"""
|
280 |
+
# fn is derived as long as any of its per-key differentiability infos
|
281 |
+
# has_derivatives. dispatch_strategy() is used to guard generation of fns in VariableType
|
282 |
+
# and ADInplaceOrViewType. We want to generate these functions as long as a
|
283 |
+
# derivative is defined for ANY dispatch key.
|
284 |
+
if fn.func.is_abstract or (
|
285 |
+
fn.info is not None and any(info.has_derivatives for info in fn.info.values())
|
286 |
+
):
|
287 |
+
# If the function is abstract (not implemented on at::Type), we must
|
288 |
+
# call the implementation on the derived type with unpacked tensors.
|
289 |
+
|
290 |
+
# If the function has a derivative specified and is concrete, we could
|
291 |
+
# call either implementation. We prefer the calling the derived
|
292 |
+
# type's implementation with unpacked tensors because it is more
|
293 |
+
# performant in some cases: any internal calls to other ATen functions
|
294 |
+
# won't have the history tracked.
|
295 |
+
|
296 |
+
# If the function has a type dispatched argument (i.e. is a factory),
|
297 |
+
# we prefer calling the derived type's implementation both because it is
|
298 |
+
# more performant and to ensure factory functions return tensors with _version
|
299 |
+
# of 0 (probably not strictly necessary, but nice to have to keeps versions simple
|
300 |
+
# to understand.
|
301 |
+
|
302 |
+
return "use_derived"
|
303 |
+
else:
|
304 |
+
# If the function is concrete (we don't have to override it) and we
|
305 |
+
# didn't declare it in derivatives.yaml, we'll assume that it is
|
306 |
+
# actually implemented out of differentiable functions. (This
|
307 |
+
# assumption might not hold, but then you'll see gradcheck fail.)
|
308 |
+
return "use_type"
|
309 |
+
|
310 |
+
|
311 |
+
def is_foreach_func(f: NativeFunction) -> bool:
|
312 |
+
return f.func.name.name.base.startswith("_foreach_")
|
313 |
+
|
314 |
+
|
315 |
+
# note(crcrpar): Most foreach functions can reference an out-place `torch` function whose schema kind
|
316 |
+
# is functional for their backward derivatives (and forward derivatives in the future), i.e.,
|
317 |
+
# they would find such one in `functional_info_by_signature`. There however are some exceptions:
|
318 |
+
_foreach_with_inplace_ref = {"_foreach_zero_"}
|
319 |
+
_foreach_with_tensor_overload = {
|
320 |
+
"_foreach_add.Tensor",
|
321 |
+
"_foreach_mul.Tensor",
|
322 |
+
"_foreach_div.Tensor",
|
323 |
+
}
|
324 |
+
|
325 |
+
|
326 |
+
# Checks if `function_schema` is a native, non-foreach function which `f`, a foreach function
|
327 |
+
# reference to generate derivatives.
|
328 |
+
def is_reference_for_foreach(
|
329 |
+
f: NativeFunction,
|
330 |
+
function_schema: FunctionSchema,
|
331 |
+
) -> bool:
|
332 |
+
return (
|
333 |
+
f.func.name.name.base.split("_foreach_")[-1] == function_schema.name.name.base
|
334 |
+
and (
|
335 |
+
not function_schema.name.name.inplace
|
336 |
+
or str(f.func.name) in _foreach_with_inplace_ref
|
337 |
+
)
|
338 |
+
and all(
|
339 |
+
ref_arg.type in (arg.type, getattr(arg.type, "elem", None))
|
340 |
+
for arg, ref_arg in zip(
|
341 |
+
f.func.arguments.flat_non_out,
|
342 |
+
function_schema.arguments.flat_non_out,
|
343 |
+
)
|
344 |
+
)
|
345 |
+
)
|
346 |
+
|
347 |
+
|
348 |
+
# TODO(crcrpar): Avoid hard coding "Default" ideally.
|
349 |
+
def gen_foreach_derivativeinfo(
|
350 |
+
foreach_function: NativeFunction,
|
351 |
+
functional_info_by_signature: Dict[
|
352 |
+
FunctionSchema, Dict[str, DifferentiabilityInfo]
|
353 |
+
],
|
354 |
+
non_functional_info_by_signature: Dict[
|
355 |
+
FunctionSchema, Dict[str, DifferentiabilityInfo]
|
356 |
+
],
|
357 |
+
dispatch_key: str = "Default",
|
358 |
+
) -> Tuple[Optional[DifferentiabilityInfo], bool]:
|
359 |
+
"""Generate DifferentiabilityInfo for out-place foreach function, return the existing one for in-place.
|
360 |
+
|
361 |
+
The second return value indicates whether the info is generated in this function.
|
362 |
+
"""
|
363 |
+
ref_diff_info: Optional[DifferentiabilityInfo] = None
|
364 |
+
|
365 |
+
for function_schema, diff_info in functional_info_by_signature.items():
|
366 |
+
if not is_reference_for_foreach(foreach_function, function_schema):
|
367 |
+
continue
|
368 |
+
ref_diff_info = diff_info[dispatch_key]
|
369 |
+
if ref_diff_info is not None:
|
370 |
+
break
|
371 |
+
# note(crcrpar): It seems like `zero`'s info isn't available in functional_info_by_signature
|
372 |
+
# while the info of `zero_` is in non_functional_info_by_signature
|
373 |
+
if (
|
374 |
+
ref_diff_info is None
|
375 |
+
and foreach_function.func.kind() == SchemaKind.inplace
|
376 |
+
and str(foreach_function.func.name) in _foreach_with_inplace_ref
|
377 |
+
):
|
378 |
+
for function_schema, diff_info in non_functional_info_by_signature.items():
|
379 |
+
if not is_reference_for_foreach(foreach_function, function_schema):
|
380 |
+
continue
|
381 |
+
ref_diff_info = diff_info[dispatch_key]
|
382 |
+
if ref_diff_info is not None:
|
383 |
+
break
|
384 |
+
if ref_diff_info is None:
|
385 |
+
return None, False
|
386 |
+
|
387 |
+
# non out-place uses the existing Derivative.
|
388 |
+
if foreach_function.func.kind() == SchemaKind.inplace:
|
389 |
+
return ref_diff_info, False
|
390 |
+
|
391 |
+
map_refarg2foreacharg, map_name2arg = {}, {}
|
392 |
+
for i, (arg, ref_arg) in enumerate(
|
393 |
+
zip(
|
394 |
+
foreach_function.func.arguments.flat_non_out,
|
395 |
+
function_schema.arguments.flat_non_out,
|
396 |
+
)
|
397 |
+
):
|
398 |
+
map_refarg2foreacharg[ref_arg.name] = arg.name
|
399 |
+
map_name2arg[arg.name] = arg
|
400 |
+
|
401 |
+
all_saved_inputs, all_saved_outputs, all_var_names = [], [], []
|
402 |
+
modified_derivative_formulas = []
|
403 |
+
for i, derivative in enumerate(ref_diff_info.derivatives):
|
404 |
+
modified_formula = derivative.formula.replace("grad", "grads[i]").replace(
|
405 |
+
"result", "result[i]"
|
406 |
+
)
|
407 |
+
saved_inputs, saved_outputs = [], []
|
408 |
+
# note(crcrpar): This context seems necessary to call `cpp.argument_type`
|
409 |
+
with local.parametrize(
|
410 |
+
use_const_ref_for_mutable_tensors=foreach_function.use_const_ref_for_mutable_tensors,
|
411 |
+
use_ilistref_for_tensor_lists=foreach_function.part_of_structured_group,
|
412 |
+
):
|
413 |
+
for ref_input in derivative.saved_inputs:
|
414 |
+
ref_input_jit_name = ref_input.expr.split(".")[0]
|
415 |
+
mapped_name = map_refarg2foreacharg[ref_input_jit_name]
|
416 |
+
if isinstance(map_name2arg[mapped_name].type, ListType):
|
417 |
+
mapped_expr = mapped_name + "[i]"
|
418 |
+
else:
|
419 |
+
mapped_expr = mapped_name
|
420 |
+
new_expr = ref_input.expr.replace(ref_input_jit_name, mapped_expr)
|
421 |
+
modified_formula = modified_formula.replace(
|
422 |
+
cast(str, ref_input.nctype.name), new_expr
|
423 |
+
)
|
424 |
+
|
425 |
+
nctype = cpp.argument_type(map_name2arg[mapped_name], binds=mapped_name)
|
426 |
+
canonical_nctype = NamedCType(
|
427 |
+
nctype.name, nctype.type.remove_const_ref()
|
428 |
+
)
|
429 |
+
saved_inputs.append(
|
430 |
+
SavedAttribute(nctype=canonical_nctype, expr=mapped_name)
|
431 |
+
)
|
432 |
+
for ref_output in derivative.saved_outputs:
|
433 |
+
if ref_output.nctype.name == "result":
|
434 |
+
saved_outputs.append(
|
435 |
+
SavedAttribute(
|
436 |
+
nctype=NamedCType(
|
437 |
+
name="result", type=BaseCType(tensorListT)
|
438 |
+
),
|
439 |
+
expr="result",
|
440 |
+
)
|
441 |
+
)
|
442 |
+
else:
|
443 |
+
raise RuntimeError("")
|
444 |
+
var_names = [map_refarg2foreacharg[var] for var in derivative.var_names]
|
445 |
+
all_var_names.extend(var_names)
|
446 |
+
all_saved_inputs.extend(saved_inputs)
|
447 |
+
all_saved_outputs.extend(saved_outputs)
|
448 |
+
modified_derivative = Derivative(
|
449 |
+
formula=modified_formula,
|
450 |
+
original_formula=derivative.formula,
|
451 |
+
var_names=tuple(var_names),
|
452 |
+
saved_inputs=tuple(saved_inputs),
|
453 |
+
saved_outputs=tuple(saved_outputs),
|
454 |
+
named_gradients=set(),
|
455 |
+
)
|
456 |
+
modified_derivative_formulas.append(modified_derivative)
|
457 |
+
|
458 |
+
with local.parametrize(
|
459 |
+
use_const_ref_for_mutable_tensors=foreach_function.use_const_ref_for_mutable_tensors,
|
460 |
+
use_ilistref_for_tensor_lists=foreach_function.part_of_structured_group,
|
461 |
+
):
|
462 |
+
args_with_derivatives = [
|
463 |
+
Binding(
|
464 |
+
name=arg.name,
|
465 |
+
nctype=cpp.argument_type(arg, binds=arg.name),
|
466 |
+
argument=arg,
|
467 |
+
default=None,
|
468 |
+
)
|
469 |
+
for arg in foreach_function.func.arguments.flat_non_out
|
470 |
+
if arg.name in all_var_names
|
471 |
+
]
|
472 |
+
|
473 |
+
forward_derivatives: List[ForwardDerivative] = []
|
474 |
+
fw_derivative: ForwardDerivative
|
475 |
+
for fw_derivative in ref_diff_info.forward_derivatives:
|
476 |
+
var_names: List[str] = list(fw_derivative.var_names) # type: ignore[no-redef]
|
477 |
+
var_types: List[Type] = list(fw_derivative.var_types)
|
478 |
+
required_inputs_fw_grad: List[str] = []
|
479 |
+
required_inputs_primal: List[str] = []
|
480 |
+
if fw_derivative.required_inputs_fw_grad is not None:
|
481 |
+
required_inputs_fw_grad = list(fw_derivative.required_inputs_fw_grad)
|
482 |
+
if fw_derivative.required_inputs_primal:
|
483 |
+
required_inputs_primal = list(fw_derivative.required_inputs_primal)
|
484 |
+
modified_formula = fw_derivative.formula
|
485 |
+
|
486 |
+
# Foreach's result is TensorList
|
487 |
+
if "result" in modified_formula:
|
488 |
+
modified_formula = fw_derivative.formula.replace("result", "result[i]")
|
489 |
+
|
490 |
+
for foreach_arg, ref_arg in zip(
|
491 |
+
foreach_function.func.arguments.flat_non_out,
|
492 |
+
ref_diff_info.func.func.arguments.flat_non_out,
|
493 |
+
):
|
494 |
+
# Modify reference forward formula
|
495 |
+
if (
|
496 |
+
isinstance(foreach_arg.type, ListType)
|
497 |
+
and not foreach_arg.type.is_tensor_like()
|
498 |
+
):
|
499 |
+
# Assuming ScalarList
|
500 |
+
modified_formula = modified_formula.replace(
|
501 |
+
ref_arg.name, foreach_arg.name + "[i]"
|
502 |
+
)
|
503 |
+
elif foreach_arg.type.is_tensor_like():
|
504 |
+
# Assuming TensorList / Tensor
|
505 |
+
# assert isinstance(foreach_arg.type, ListType), f"{foreach_function.func.name}, {foreach_arg.type}"
|
506 |
+
assert isinstance(foreach_arg.type, ListType) or (
|
507 |
+
foreach_arg.type == BaseType(BaseTy.Tensor)
|
508 |
+
and str(foreach_function.func.name) in _foreach_with_tensor_overload
|
509 |
+
), f"{foreach_function.func.name}, {foreach_arg.type}"
|
510 |
+
for suffix in ("_p", "_t"):
|
511 |
+
curr_expr = ref_arg.name + suffix
|
512 |
+
if curr_expr in modified_formula:
|
513 |
+
new_expr = foreach_arg.name + suffix
|
514 |
+
modified_formula = modified_formula.replace(curr_expr, new_expr)
|
515 |
+
else:
|
516 |
+
# Assuming Scalar
|
517 |
+
if foreach_arg.name != ref_arg.name:
|
518 |
+
modified_formula = modified_formula.replace(
|
519 |
+
ref_arg.name, foreach_arg.name
|
520 |
+
)
|
521 |
+
|
522 |
+
# note(crcrpar): there should exist a cooler way...
|
523 |
+
for i, name in enumerate(var_names):
|
524 |
+
if name == ref_arg.name:
|
525 |
+
var_names[i] = foreach_arg.name
|
526 |
+
var_types[i] = foreach_arg.type
|
527 |
+
for i, name in enumerate(required_inputs_fw_grad):
|
528 |
+
if name == ref_arg.name:
|
529 |
+
required_inputs_fw_grad[i] = foreach_arg.name
|
530 |
+
for i, name in enumerate(required_inputs_primal):
|
531 |
+
if name == ref_arg.name:
|
532 |
+
required_inputs_primal[i] = foreach_arg.name
|
533 |
+
forward_derivatives.append(
|
534 |
+
ForwardDerivative(
|
535 |
+
formula=modified_formula,
|
536 |
+
var_names=tuple(var_names),
|
537 |
+
var_types=tuple(var_types),
|
538 |
+
required_inputs_fw_grad=tuple(required_inputs_fw_grad),
|
539 |
+
required_inputs_primal=tuple(required_inputs_primal),
|
540 |
+
required_original_self_value=fw_derivative.required_original_self_value,
|
541 |
+
is_reusing_outplace_formula=fw_derivative.is_reusing_outplace_formula,
|
542 |
+
)
|
543 |
+
)
|
544 |
+
|
545 |
+
return (
|
546 |
+
DifferentiabilityInfo(
|
547 |
+
name=foreach_function.func.name.name.base,
|
548 |
+
func=foreach_function,
|
549 |
+
op=f"Foreach{ref_diff_info.op}{foreach_function.func.name.overload_name}",
|
550 |
+
derivatives=modified_derivative_formulas,
|
551 |
+
forward_derivatives=forward_derivatives,
|
552 |
+
all_saved_inputs=tuple(set(all_saved_inputs)),
|
553 |
+
all_saved_outputs=tuple(set(all_saved_outputs)),
|
554 |
+
available_named_gradients=(),
|
555 |
+
used_named_gradients=set(),
|
556 |
+
args_with_derivatives=args_with_derivatives,
|
557 |
+
non_differentiable_arg_names=[],
|
558 |
+
output_differentiability=None,
|
559 |
+
output_differentiability_conditions=None,
|
560 |
+
),
|
561 |
+
True,
|
562 |
+
)
|
563 |
+
|
564 |
+
|
565 |
+
def match_differentiability_info(
|
566 |
+
native_functions: List[NativeFunction],
|
567 |
+
differentiability_infos: Dict[FunctionSchema, Dict[str, DifferentiabilityInfo]],
|
568 |
+
) -> List[NativeFunctionWithDifferentiabilityInfo]:
|
569 |
+
"""Sets the "derivative" key on declarations to matching autograd function
|
570 |
+
In-place functions will use the out-of-place derivative definition if there
|
571 |
+
is no in-place specific derivative.
|
572 |
+
"""
|
573 |
+
|
574 |
+
functional_info_by_signature = {
|
575 |
+
schema.signature(strip_default=True): info_dict
|
576 |
+
for schema, info_dict in differentiability_infos.items()
|
577 |
+
if schema.kind() == SchemaKind.functional
|
578 |
+
}
|
579 |
+
non_functional_info_by_signature = {
|
580 |
+
schema.signature(strip_default=True): info_dict
|
581 |
+
for schema, info_dict in differentiability_infos.items()
|
582 |
+
if schema.kind() != SchemaKind.functional
|
583 |
+
}
|
584 |
+
|
585 |
+
def find_info(
|
586 |
+
f: NativeFunction,
|
587 |
+
) -> Tuple[Optional[Dict[str, DifferentiabilityInfo]], bool]:
|
588 |
+
# Don't bother matching info to generated out= variants
|
589 |
+
if "generated" in f.tags and f.func.kind() == SchemaKind.out:
|
590 |
+
return None, False
|
591 |
+
|
592 |
+
# (1) Check for an exact match
|
593 |
+
if f.func in differentiability_infos:
|
594 |
+
return differentiability_infos[f.func], True
|
595 |
+
|
596 |
+
# (2) If no exact match, check if the out-of-place variant
|
597 |
+
# of this operator has a match.
|
598 |
+
# i.e mul() for mul_() or mul_out()
|
599 |
+
# note(crcrpar): Check foreach or not because in-place foreach functions use backward defined for the existing
|
600 |
+
# native functions instead of the out-place counterparts.
|
601 |
+
f_sig = f.func.signature(strip_default=True)
|
602 |
+
if f_sig in functional_info_by_signature and not is_foreach_func(f):
|
603 |
+
return functional_info_by_signature[f_sig], False
|
604 |
+
|
605 |
+
# (3) Some operators have a derivative explicitly defined for the mutable
|
606 |
+
# variant, but get a code-generated out-of-place variant which does *not*
|
607 |
+
# come with a derivative formula.
|
608 |
+
# For the generated out-of-place variant, use the mutable variant's formula
|
609 |
+
# if it exists.
|
610 |
+
if "generated" in f.tags and f_sig in non_functional_info_by_signature:
|
611 |
+
info_dict = non_functional_info_by_signature[f_sig]
|
612 |
+
# See https://github.com/pytorch/pytorch/pull/76320/files#r874816389
|
613 |
+
assert not any(
|
614 |
+
any("self" in str(inpt.nctype.name) for inpt in info.all_saved_inputs)
|
615 |
+
for info in info_dict.values()
|
616 |
+
), f"""\
|
617 |
+
Attempted to convert a derivative formula for a mutable operator
|
618 |
+
to be used by automatically by its functional variant ("{str(f.func)}").
|
619 |
+
this is not currently supported (we'd need to fix up the formula in the codegen)."""
|
620 |
+
return info_dict, False
|
621 |
+
|
622 |
+
# (4) Generate derivative information of foreach functions if none is defined in `derivatives.yaml`
|
623 |
+
if is_foreach_func(f):
|
624 |
+
assert f.func not in differentiability_infos
|
625 |
+
diff_info, is_generated = gen_foreach_derivativeinfo(
|
626 |
+
f,
|
627 |
+
functional_info_by_signature,
|
628 |
+
non_functional_info_by_signature,
|
629 |
+
)
|
630 |
+
if diff_info is None:
|
631 |
+
return None, False
|
632 |
+
# TODO(crcrpar): Avoid hard coding "Default" ideally.
|
633 |
+
diff_info_dict = {"Default": diff_info}
|
634 |
+
if is_generated:
|
635 |
+
differentiability_infos[f.func] = diff_info_dict
|
636 |
+
functional_info_by_signature[f.func] = diff_info_dict
|
637 |
+
return diff_info_dict, is_generated
|
638 |
+
|
639 |
+
return None, False
|
640 |
+
|
641 |
+
result: List[NativeFunctionWithDifferentiabilityInfo] = []
|
642 |
+
for f in native_functions:
|
643 |
+
info_dict, is_exact_match = find_info(f)
|
644 |
+
|
645 |
+
# Currently, the '.strides()' to 'strides_or_error' replacement does not support
|
646 |
+
# 'self' derivatives of an inplace function, so we must check for this case.
|
647 |
+
if f.func.kind() == SchemaKind.inplace and (info_dict is not None):
|
648 |
+
for info in info_dict.values():
|
649 |
+
for derivative in info.derivatives:
|
650 |
+
if "self" in derivative.var_names:
|
651 |
+
for saved_input in derivative.saved_inputs:
|
652 |
+
assert "strides_or_error" not in saved_input.expr, (
|
653 |
+
"Calling '.strides()' in the 'self' derivative formula of an "
|
654 |
+
f"in-place function is not supported: {f.func}"
|
655 |
+
)
|
656 |
+
|
657 |
+
if not info_dict:
|
658 |
+
result.append(
|
659 |
+
NativeFunctionWithDifferentiabilityInfo(
|
660 |
+
func=f, info=None, fw_derivatives=None
|
661 |
+
)
|
662 |
+
)
|
663 |
+
continue
|
664 |
+
|
665 |
+
fw_derivative_dict: Dict[str, Sequence[ForwardDerivative]] = {}
|
666 |
+
for key, info in info_dict.items():
|
667 |
+
if not info.forward_derivatives:
|
668 |
+
fw_derivative_dict[key] = []
|
669 |
+
continue
|
670 |
+
|
671 |
+
forward_derivatives = info.forward_derivatives
|
672 |
+
|
673 |
+
# For functions that have a single def for out-of-place and inplace (like abs())
|
674 |
+
if f.func.kind() == SchemaKind.inplace:
|
675 |
+
# For inplace functions there is a little bit of work to do:
|
676 |
+
# 1) Validate the formula and make sure the input that is modified in not used:
|
677 |
+
# - If there is a formula for the inplace variant of the function (is_exact_match == True) then
|
678 |
+
# we make sure that the original value of the input that is being modified inplace (self_p) is
|
679 |
+
# not used in the formula. Note that the formula can use "original_self_p" here and that would
|
680 |
+
# trigger a clone of the original input.
|
681 |
+
# - If we are re-using the out of place formula (is_exact_match == False) then we replace every
|
682 |
+
# occurrence of self_p and self_t by original_self_p and original_self_t. These will be
|
683 |
+
# populated by cloned version of the original input (either the clone done by the backward AD
|
684 |
+
# logic if self is also used in a backward formula or a special clone that we add).
|
685 |
+
# 2) At this point, there cannot be a self_p in the formula.
|
686 |
+
# 3) Change "result" into "self_p" as by design, in the inplace function codegen, the result is
|
687 |
+
# simply called self (as it is modified inplace).
|
688 |
+
# 4) Update the required primals data in case it used to contain "result" but should now contain
|
689 |
+
# "self"
|
690 |
+
# 5) If it is not an exact match, the user formula is not modifying the existing forward grad
|
691 |
+
# inplace as it should. So add some code that makes sure that we do so if the forward grad
|
692 |
+
# already exists.
|
693 |
+
|
694 |
+
assert (
|
695 |
+
len(info.forward_derivatives) == 1
|
696 |
+
) # Only single output inplace should exist
|
697 |
+
fw_info = info.forward_derivatives[0]
|
698 |
+
formula = fw_info.formula
|
699 |
+
|
700 |
+
def replace_self_with_original_self(formula: str, postfix: str) -> str:
|
701 |
+
def repl(m: Match[str]) -> str:
|
702 |
+
return f"{m.group(1)}original_self{postfix}{m.group(2)}"
|
703 |
+
|
704 |
+
return re.sub(IDENT_REGEX.format(f"self{postfix}"), repl, formula)
|
705 |
+
|
706 |
+
if re.search(IDENT_REGEX.format("self_p"), formula):
|
707 |
+
if is_exact_match:
|
708 |
+
# For manually defined formulas, don't allow the original value to be used
|
709 |
+
raise RuntimeError(
|
710 |
+
f'The formula for "{f.func.name}" is using the original value of self '
|
711 |
+
"that is being modified inplace. This would lead to wrong forward gradients. "
|
712 |
+
'Please use "result" in the formula only.'
|
713 |
+
)
|
714 |
+
else:
|
715 |
+
# When the original formula is out of place, we save a clone of the primal
|
716 |
+
# value to be able to access this value if needed
|
717 |
+
# replace "self_p"/"self_t" from the formula by "original_self_p"/"original_self_t"
|
718 |
+
formula = replace_self_with_original_self(formula, "_p")
|
719 |
+
formula = replace_self_with_original_self(formula, "_t")
|
720 |
+
|
721 |
+
# replace "result" from the formula by "self_p"
|
722 |
+
def repl(m: Match[str]) -> str:
|
723 |
+
return f"{m.group(1)}self_p{m.group(2)}"
|
724 |
+
|
725 |
+
formula = re.sub(IDENT_REGEX.format("result"), repl, formula)
|
726 |
+
|
727 |
+
required_primals = fw_info.required_inputs_primal
|
728 |
+
if re.search(IDENT_REGEX.format("self_p"), formula):
|
729 |
+
required_primals = (
|
730 |
+
required_primals + ("self",) if required_primals else ("self",)
|
731 |
+
)
|
732 |
+
|
733 |
+
if not is_exact_match:
|
734 |
+
# NOTE [In-place forward AD formula Optimization]
|
735 |
+
#
|
736 |
+
# This optimization transforms the formula to directly do inplace, i.e.
|
737 |
+
# instead of self_t.copy_(self_t.op()) we do self_t.op_() when the following are met:
|
738 |
+
#
|
739 |
+
# 1) the formula satisfies the pattern: "self_t.op(*args)"
|
740 |
+
# 2) "op" in (1) needs to be the same as the op the derivative is for
|
741 |
+
#
|
742 |
+
# (2) may seem too strict, but currently the only ops that satisfy (1) also satisfy (2)
|
743 |
+
# If there is a need, we can relax (2) to allow any op that has an in-place variant
|
744 |
+
is_single_method_on_self_t = False
|
745 |
+
directly_do_inplace = False
|
746 |
+
op_name: Optional[str] = None
|
747 |
+
between_parens: Optional[str] = None
|
748 |
+
match = re.fullmatch(r"self_t.([\w]*)\((.*)\)", formula)
|
749 |
+
if match:
|
750 |
+
op_name, between_parens = match.group(1), match.group(2)
|
751 |
+
|
752 |
+
# We want to...
|
753 |
+
# Match: self_t.op1(other_p.op2(arg))
|
754 |
+
# Avoid: self_t.op1(args) + self_t.op2(args)
|
755 |
+
# Avoid: self_t.op1(other_p.op2(arg)) + self_t.op2(args)
|
756 |
+
def check_parens_nest_level_gt_zero(s: str) -> bool:
|
757 |
+
level = 1
|
758 |
+
for ch in s:
|
759 |
+
if ch == ")":
|
760 |
+
level -= 1
|
761 |
+
if level == 0:
|
762 |
+
return False
|
763 |
+
if ch == "(":
|
764 |
+
level += 1
|
765 |
+
return True
|
766 |
+
|
767 |
+
is_single_method_on_self_t = check_parens_nest_level_gt_zero(
|
768 |
+
between_parens
|
769 |
+
)
|
770 |
+
directly_do_inplace = (
|
771 |
+
is_single_method_on_self_t and op_name == info.name
|
772 |
+
)
|
773 |
+
|
774 |
+
if directly_do_inplace:
|
775 |
+
assert op_name is not None
|
776 |
+
assert between_parens is not None
|
777 |
+
formula = f"self_t_raw.defined() ? self_t_raw.{op_name}_({between_parens}) : {formula}"
|
778 |
+
else:
|
779 |
+
# Make sure that the forward grad is modified inplace when the original formula
|
780 |
+
# is out of place
|
781 |
+
formula = f"self_t_raw.defined() ? self_t_raw.copy_({formula}) : {formula}"
|
782 |
+
|
783 |
+
required_original_self_value = bool(
|
784 |
+
re.search(IDENT_REGEX.format("original_self_p"), formula)
|
785 |
+
) or bool(re.search(IDENT_REGEX.format("original_self_t"), formula))
|
786 |
+
|
787 |
+
forward_derivatives = [
|
788 |
+
ForwardDerivative(
|
789 |
+
formula=formula,
|
790 |
+
var_names=("self",),
|
791 |
+
var_types=fw_info.var_types,
|
792 |
+
required_inputs_fw_grad=fw_info.required_inputs_fw_grad,
|
793 |
+
required_inputs_primal=required_primals,
|
794 |
+
required_original_self_value=required_original_self_value,
|
795 |
+
is_reusing_outplace_formula=not is_exact_match,
|
796 |
+
),
|
797 |
+
]
|
798 |
+
|
799 |
+
fw_derivative_dict[key] = forward_derivatives
|
800 |
+
|
801 |
+
result.append(
|
802 |
+
NativeFunctionWithDifferentiabilityInfo(
|
803 |
+
func=f, info=info_dict, fw_derivatives=fw_derivative_dict
|
804 |
+
)
|
805 |
+
)
|
806 |
+
|
807 |
+
return result
|
808 |
+
|
809 |
+
|
810 |
+
def is_differentiable(
|
811 |
+
name: str, type: Type, info: Optional[DifferentiabilityInfo]
|
812 |
+
) -> bool:
|
813 |
+
return type.is_tensor_like() and (
|
814 |
+
info is None or name not in info.non_differentiable_arg_names
|
815 |
+
)
|
816 |
+
|
817 |
+
|
818 |
+
def gen_differentiable_outputs(
|
819 |
+
fn: NativeFunctionWithDifferentiabilityInfo, key: str = "Default"
|
820 |
+
) -> List[DifferentiableOutput]:
|
821 |
+
f = fn.func
|
822 |
+
info = fn.info[key] if fn.info else None
|
823 |
+
outputs: List[DifferentiableOutput] = [
|
824 |
+
DifferentiableOutput(
|
825 |
+
name=name,
|
826 |
+
type=ret.type,
|
827 |
+
cpp_type=cpp.return_type(ret, symint=True).cpp_type(),
|
828 |
+
)
|
829 |
+
for name, ret in zip(cpp.return_names(f), f.func.returns)
|
830 |
+
]
|
831 |
+
output_differentiability = info.output_differentiability if info else None
|
832 |
+
if output_differentiability is not None:
|
833 |
+
if len(output_differentiability) != len(outputs):
|
834 |
+
raise RuntimeError(
|
835 |
+
f"The length of output_differentiability ({len(output_differentiability)}), "
|
836 |
+
f"does not match the number of outputs ({len(outputs)})."
|
837 |
+
)
|
838 |
+
differentiable_outputs: List[DifferentiableOutput] = []
|
839 |
+
if False in output_differentiability and f.func.kind() == SchemaKind.inplace:
|
840 |
+
raise RuntimeError(
|
841 |
+
"output_differentiability=False for inplace operation (version_counter won't get updated)"
|
842 |
+
)
|
843 |
+
for differentiable, output in zip(output_differentiability, outputs):
|
844 |
+
if differentiable:
|
845 |
+
differentiable_outputs.append(output)
|
846 |
+
return differentiable_outputs
|
847 |
+
candidate_differentiable_outputs = list(
|
848 |
+
filter(lambda r: is_differentiable(r.name, r.type, info), outputs)
|
849 |
+
)
|
850 |
+
if uses_single_grad(info):
|
851 |
+
return candidate_differentiable_outputs[:1]
|
852 |
+
else:
|
853 |
+
return candidate_differentiable_outputs
|
venv/lib/python3.10/site-packages/torchgen/api/cpp.py
ADDED
@@ -0,0 +1,467 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, Optional, Sequence, Set, Union
|
2 |
+
|
3 |
+
from torchgen import local
|
4 |
+
from torchgen.api.types import (
|
5 |
+
ArgName,
|
6 |
+
ArrayCType,
|
7 |
+
ArrayRefCType,
|
8 |
+
BaseCType,
|
9 |
+
BaseTypeToCppMapping,
|
10 |
+
Binding,
|
11 |
+
boolT,
|
12 |
+
ConstRefCType,
|
13 |
+
CType,
|
14 |
+
dimnameListT,
|
15 |
+
intArrayRefT,
|
16 |
+
iTensorListRefT,
|
17 |
+
ListCType,
|
18 |
+
longT,
|
19 |
+
MutRefCType,
|
20 |
+
NamedCType,
|
21 |
+
OptionalCType,
|
22 |
+
optionalIntArrayRefT,
|
23 |
+
optionalSymIntArrayRefT,
|
24 |
+
scalarT,
|
25 |
+
SpecialArgName,
|
26 |
+
symIntArrayRefT,
|
27 |
+
SymIntT,
|
28 |
+
tensorListT,
|
29 |
+
tensorOptionsT,
|
30 |
+
tensorT,
|
31 |
+
TupleCType,
|
32 |
+
VectorCType,
|
33 |
+
voidT,
|
34 |
+
)
|
35 |
+
from torchgen.model import (
|
36 |
+
Argument,
|
37 |
+
Arguments,
|
38 |
+
BaseTy,
|
39 |
+
BaseType,
|
40 |
+
FunctionSchema,
|
41 |
+
ListType,
|
42 |
+
NativeFunction,
|
43 |
+
OptionalType,
|
44 |
+
Return,
|
45 |
+
SelfArgument,
|
46 |
+
TensorOptionsArguments,
|
47 |
+
Type,
|
48 |
+
)
|
49 |
+
from torchgen.utils import assert_never
|
50 |
+
|
51 |
+
# This file describes the translation of JIT schema to the public C++
|
52 |
+
# API, which is what people use when they call functions like at::add.
|
53 |
+
#
|
54 |
+
# Prominent characteristics of the C++ API:
|
55 |
+
#
|
56 |
+
# - dtype, layout, device and pin_memory are collected into
|
57 |
+
# a single C++ type TensorOptions (the native functions API
|
58 |
+
# also has this, but tensor options is really most relevant
|
59 |
+
# for the C++ API; it makes calling kwarg factory functions
|
60 |
+
# pleasant)
|
61 |
+
#
|
62 |
+
# - defaulting lives here (in fact, the dispatcher is completely
|
63 |
+
# oblivious of defaults!)
|
64 |
+
#
|
65 |
+
# BTW: policy on name collisions: we try not to have types with
|
66 |
+
# collisions, but functions are fair game to collide
|
67 |
+
|
68 |
+
|
69 |
+
def name(
|
70 |
+
func: FunctionSchema,
|
71 |
+
*,
|
72 |
+
faithful_name_for_out_overloads: bool = False,
|
73 |
+
symint_overload: bool = False,
|
74 |
+
) -> str:
|
75 |
+
name = str(func.name.name)
|
76 |
+
if symint_overload:
|
77 |
+
name += "_symint"
|
78 |
+
if func.is_out_fn():
|
79 |
+
if faithful_name_for_out_overloads:
|
80 |
+
name += "_outf"
|
81 |
+
else:
|
82 |
+
name += "_out"
|
83 |
+
|
84 |
+
return name
|
85 |
+
|
86 |
+
|
87 |
+
# Translation of "value types" in JIT schema to C++ API type. Value
|
88 |
+
# types look the same no matter if they are argument types or return
|
89 |
+
# types. Returns None if the type in question is not a value type.
|
90 |
+
def valuetype_type(
|
91 |
+
t: Type,
|
92 |
+
*,
|
93 |
+
binds: ArgName,
|
94 |
+
remove_non_owning_ref_types: bool = False,
|
95 |
+
symint: bool = False,
|
96 |
+
) -> Optional[NamedCType]:
|
97 |
+
if isinstance(t, BaseType):
|
98 |
+
if t.name == BaseTy.Tensor or t.name == BaseTy.Scalar:
|
99 |
+
return None
|
100 |
+
elif str(t) == "SymInt":
|
101 |
+
if symint:
|
102 |
+
return NamedCType(binds, BaseCType(SymIntT))
|
103 |
+
else:
|
104 |
+
return NamedCType(binds, BaseCType(longT))
|
105 |
+
if remove_non_owning_ref_types:
|
106 |
+
if t.name == BaseTy.str:
|
107 |
+
raise AssertionError(
|
108 |
+
"string ref->value conversion: not implemented yet"
|
109 |
+
)
|
110 |
+
# All other BaseType currently map directly to BaseCppTypes.
|
111 |
+
return NamedCType(binds, BaseCType(BaseTypeToCppMapping[t.name]))
|
112 |
+
elif isinstance(t, OptionalType):
|
113 |
+
elem = valuetype_type(t.elem, binds=binds, symint=symint)
|
114 |
+
if elem is None:
|
115 |
+
return None
|
116 |
+
return NamedCType(binds, OptionalCType(elem.type))
|
117 |
+
elif isinstance(t, ListType):
|
118 |
+
if str(t.elem) == "bool":
|
119 |
+
assert t.size is not None
|
120 |
+
return NamedCType(binds, ArrayCType(BaseCType(boolT), t.size))
|
121 |
+
else:
|
122 |
+
return None
|
123 |
+
else:
|
124 |
+
raise AssertionError(f"unrecognized type {repr(t)}")
|
125 |
+
|
126 |
+
|
127 |
+
# Translation of types occurring in JIT arguments to a C++ argument type.
|
128 |
+
# If remove_non_owning_ref_types is set, we'll guarantee that the outputed CType is not a non-owning reference type.
|
129 |
+
# For example, we'll return std::vector<int> instead of IntArrayRef.
|
130 |
+
# See Note [translation from C++ reference to value types]
|
131 |
+
def argumenttype_type(
|
132 |
+
t: Type,
|
133 |
+
*,
|
134 |
+
mutable: bool,
|
135 |
+
binds: ArgName,
|
136 |
+
remove_non_owning_ref_types: bool = False,
|
137 |
+
symint: bool = False,
|
138 |
+
) -> NamedCType:
|
139 |
+
# If it's a value type, do the value type translation
|
140 |
+
r = valuetype_type(
|
141 |
+
t,
|
142 |
+
binds=binds,
|
143 |
+
symint=symint,
|
144 |
+
remove_non_owning_ref_types=remove_non_owning_ref_types,
|
145 |
+
)
|
146 |
+
if r is not None:
|
147 |
+
return r
|
148 |
+
|
149 |
+
if isinstance(t, BaseType):
|
150 |
+
if t.name == BaseTy.Tensor:
|
151 |
+
if mutable and not local.use_const_ref_for_mutable_tensors():
|
152 |
+
return NamedCType(binds, MutRefCType(BaseCType(tensorT)))
|
153 |
+
else:
|
154 |
+
return NamedCType(binds, ConstRefCType(BaseCType(tensorT)))
|
155 |
+
elif t.name == BaseTy.Scalar:
|
156 |
+
return NamedCType(binds, ConstRefCType(BaseCType(scalarT)))
|
157 |
+
else:
|
158 |
+
raise AssertionError(f"base type should have been value type {t}")
|
159 |
+
elif isinstance(t, OptionalType):
|
160 |
+
if str(t.elem) == "Tensor":
|
161 |
+
if mutable and not local.use_const_ref_for_mutable_tensors():
|
162 |
+
return NamedCType(
|
163 |
+
binds, MutRefCType(BaseCType(tensorT))
|
164 |
+
) # TODO: fix this discrepancy
|
165 |
+
else:
|
166 |
+
return NamedCType(
|
167 |
+
binds, ConstRefCType(OptionalCType(BaseCType(tensorT)))
|
168 |
+
)
|
169 |
+
elif str(t.elem) == "Scalar":
|
170 |
+
return NamedCType(binds, ConstRefCType(OptionalCType(BaseCType(scalarT))))
|
171 |
+
elif isinstance(t.elem, ListType) and str(t.elem.elem) == "int":
|
172 |
+
return NamedCType(binds, BaseCType(optionalIntArrayRefT))
|
173 |
+
elif isinstance(t.elem, ListType) and str(t.elem.elem) == "SymInt":
|
174 |
+
if symint:
|
175 |
+
return NamedCType(binds, BaseCType(optionalSymIntArrayRefT))
|
176 |
+
else:
|
177 |
+
return NamedCType(binds, BaseCType(optionalIntArrayRefT))
|
178 |
+
elem = argumenttype_type(t.elem, mutable=mutable, binds=binds, symint=symint)
|
179 |
+
return NamedCType(binds, OptionalCType(elem.type))
|
180 |
+
elif isinstance(t, ListType):
|
181 |
+
# TODO: remove these special cases, ArrayRef fallthrough works fine
|
182 |
+
if str(t.elem) == "int":
|
183 |
+
if remove_non_owning_ref_types:
|
184 |
+
return NamedCType(binds, VectorCType(BaseCType(longT)))
|
185 |
+
else:
|
186 |
+
return NamedCType(binds, BaseCType(intArrayRefT))
|
187 |
+
if str(t.elem) == "SymInt":
|
188 |
+
if remove_non_owning_ref_types:
|
189 |
+
if symint:
|
190 |
+
return NamedCType(binds, VectorCType(BaseCType(SymIntT)))
|
191 |
+
else:
|
192 |
+
return NamedCType(binds, VectorCType(BaseCType(longT)))
|
193 |
+
else:
|
194 |
+
if symint:
|
195 |
+
return NamedCType(binds, BaseCType(symIntArrayRefT))
|
196 |
+
else:
|
197 |
+
return NamedCType(binds, BaseCType(intArrayRefT))
|
198 |
+
if str(t.elem) == "Tensor":
|
199 |
+
if local.use_ilistref_for_tensor_lists():
|
200 |
+
return NamedCType(binds, ConstRefCType(BaseCType(iTensorListRefT)))
|
201 |
+
else:
|
202 |
+
return NamedCType(binds, BaseCType(tensorListT))
|
203 |
+
elif str(t.elem) == "Scalar":
|
204 |
+
return NamedCType(binds, ArrayRefCType(BaseCType(scalarT)))
|
205 |
+
elif str(t.elem) == "Dimname":
|
206 |
+
return NamedCType(binds, BaseCType(dimnameListT))
|
207 |
+
elif str(t.elem) == "Tensor?":
|
208 |
+
return NamedCType(
|
209 |
+
binds, ConstRefCType(ListCType(OptionalCType(BaseCType(tensorT))))
|
210 |
+
)
|
211 |
+
elem = argumenttype_type(t.elem, mutable=mutable, binds=binds, symint=symint)
|
212 |
+
return NamedCType(binds, ArrayRefCType(elem.type))
|
213 |
+
else:
|
214 |
+
raise AssertionError(f"unrecognized type {repr(t)}")
|
215 |
+
|
216 |
+
|
217 |
+
# Translate a JIT argument into its C++ type
|
218 |
+
def argument_type(a: Argument, *, binds: ArgName, symint: bool = False) -> NamedCType:
|
219 |
+
return argumenttype_type(a.type, mutable=a.is_write, symint=symint, binds=binds)
|
220 |
+
|
221 |
+
|
222 |
+
# Translation of a (non-multi) return type from JIT to C++
|
223 |
+
# N.B: returntype_type returns a CType, not a NamedCType.
|
224 |
+
# This is mostly because of the mismatch between return types and return names.
|
225 |
+
# e.g. a function with a return type of 'void' has 0 return names,
|
226 |
+
# and a function with a return type of 'std::tuple' has >1 return name.
|
227 |
+
def returntype_type(t: Type, *, mutable: bool, symint: bool = False) -> CType:
|
228 |
+
# placeholder is ignored
|
229 |
+
# NB: symint is ALWAYS respected for return types. So symint argument
|
230 |
+
# here is IGNORED
|
231 |
+
r = valuetype_type(t, binds="__placeholder__", symint=True)
|
232 |
+
if r is not None:
|
233 |
+
return r.type
|
234 |
+
|
235 |
+
if isinstance(t, BaseType):
|
236 |
+
if t.name == BaseTy.Tensor:
|
237 |
+
if mutable:
|
238 |
+
if local.use_const_ref_for_mutable_tensors():
|
239 |
+
return ConstRefCType(BaseCType(tensorT))
|
240 |
+
else:
|
241 |
+
return MutRefCType(BaseCType(tensorT))
|
242 |
+
else:
|
243 |
+
# Note [Tensor Copy Returns]
|
244 |
+
# Currently, we use "Argument.is_write" to determine
|
245 |
+
# whether or not Tensor return types should be copies or references.
|
246 |
+
# If that ever changes, take a look at other locations of this note!
|
247 |
+
return BaseCType(tensorT)
|
248 |
+
elif t.name == BaseTy.Scalar:
|
249 |
+
return BaseCType(scalarT)
|
250 |
+
elif isinstance(t, ListType):
|
251 |
+
assert (
|
252 |
+
not mutable
|
253 |
+
), "Native functions should never return a mutable tensor list. They should return void."
|
254 |
+
elem = returntype_type(t.elem, mutable=False)
|
255 |
+
assert t.size is None, f"fixed size list returns not supported: {t}"
|
256 |
+
return VectorCType(elem)
|
257 |
+
elif isinstance(t, OptionalType):
|
258 |
+
elem = returntype_type(t.elem, mutable=mutable)
|
259 |
+
if str(t.elem) == "Tensor":
|
260 |
+
return OptionalCType(elem)
|
261 |
+
|
262 |
+
raise AssertionError(f"unrecognized return type {t}")
|
263 |
+
|
264 |
+
|
265 |
+
# Translation of a single return to its C++ type
|
266 |
+
def return_type(r: Return, *, symint: bool = False) -> CType:
|
267 |
+
return returntype_type(r.type, mutable=r.is_write, symint=symint)
|
268 |
+
|
269 |
+
|
270 |
+
# Translation of a full (possibly multi) return from JIT to its C++ type
|
271 |
+
def returns_type(rs: Sequence[Return], *, symint: bool = False) -> CType:
|
272 |
+
if len(rs) == 0:
|
273 |
+
return BaseCType(voidT)
|
274 |
+
elif len(rs) == 1:
|
275 |
+
return return_type(rs[0], symint=symint)
|
276 |
+
else:
|
277 |
+
return TupleCType([return_type(r, symint=symint) for r in rs])
|
278 |
+
|
279 |
+
|
280 |
+
def return_names(f: NativeFunction, *, fallback_name: str = "result") -> Sequence[str]:
|
281 |
+
returns: List[str] = []
|
282 |
+
for i, r in enumerate(f.func.returns):
|
283 |
+
# If we have an inplace function, the return argument is
|
284 |
+
# implicitly named self.
|
285 |
+
# TODO: Consider incorporating this into the data model
|
286 |
+
if f.func.name.name.inplace:
|
287 |
+
assert i == 0, "illegal inplace function with multiple returns"
|
288 |
+
name = "self"
|
289 |
+
# If we are out function, the name is the name of the
|
290 |
+
# corresponding output function (r.name will get recorded
|
291 |
+
# in field_name later.)
|
292 |
+
elif f.func.is_out_fn():
|
293 |
+
name = f.func.arguments.out[i].name
|
294 |
+
# If the return argument is explicitly named...
|
295 |
+
elif r.name:
|
296 |
+
name_conflict = any(
|
297 |
+
r.name == a.name for a in f.func.schema_order_arguments()
|
298 |
+
)
|
299 |
+
if name_conflict and not f.func.is_out_fn():
|
300 |
+
name = f"{r.name}_return"
|
301 |
+
else:
|
302 |
+
name = r.name
|
303 |
+
# If there is no explicit name and no fallback name was passed in, we just name the output result,
|
304 |
+
# unless it's a multi-return, in which case it's result0,
|
305 |
+
# result1, etc (zero-indexed)
|
306 |
+
else:
|
307 |
+
name = fallback_name if len(f.func.returns) == 1 else f"{fallback_name}{i}"
|
308 |
+
returns.append(name)
|
309 |
+
return returns
|
310 |
+
|
311 |
+
|
312 |
+
JIT_TO_CPP_DEFAULT = {
|
313 |
+
"False": "false",
|
314 |
+
"True": "true",
|
315 |
+
"None": "c10::nullopt", # UGH this one is type directed
|
316 |
+
"Mean": "at::Reduction::Mean",
|
317 |
+
"[]": "{}",
|
318 |
+
"contiguous_format": "MemoryFormat::Contiguous",
|
319 |
+
"long": "at::kLong",
|
320 |
+
}
|
321 |
+
|
322 |
+
|
323 |
+
# Convert a JIT default into C++ expression representing the default
|
324 |
+
def default_expr(d: str, t: Type, *, symint: bool) -> str:
|
325 |
+
if d == "None" and str(t) == "Tensor?":
|
326 |
+
return "{}"
|
327 |
+
if isinstance(t, BaseType) and t.name is BaseTy.str:
|
328 |
+
# Schema allows single quotes but C++ needs double
|
329 |
+
if len(d) >= 2 and d[0] == "'" and d[-1] == "'":
|
330 |
+
s = ""
|
331 |
+
i = 1
|
332 |
+
while i + 1 < len(d):
|
333 |
+
if d[i] != "\\":
|
334 |
+
if d[i] == '"':
|
335 |
+
s += '\\"'
|
336 |
+
else:
|
337 |
+
s += d[i]
|
338 |
+
i += 1
|
339 |
+
else:
|
340 |
+
if d[i + 1] == "'":
|
341 |
+
s += "'"
|
342 |
+
else:
|
343 |
+
s += d[i : i + 2]
|
344 |
+
i += 2
|
345 |
+
|
346 |
+
return f'"{s}"'
|
347 |
+
|
348 |
+
if isinstance(t, OptionalType):
|
349 |
+
if d == "None":
|
350 |
+
return "c10::nullopt"
|
351 |
+
|
352 |
+
return default_expr(d, t.elem, symint=symint)
|
353 |
+
|
354 |
+
if isinstance(t, ListType):
|
355 |
+
if d.startswith("[") and d.endswith("]"):
|
356 |
+
return "{" + d[1:-1] + "}"
|
357 |
+
elif symint and d.isdigit() and str(t.elem) == "SymInt":
|
358 |
+
return f"c10::SymInt({d})"
|
359 |
+
elif t.size is None:
|
360 |
+
# NOTE: Sized lists can have scalar defaults
|
361 |
+
raise ValueError(f"Expected a list default '[...]' but found: '{d}'")
|
362 |
+
|
363 |
+
return JIT_TO_CPP_DEFAULT.get(d, d)
|
364 |
+
|
365 |
+
|
366 |
+
# Convert an argument into its C++ API form
|
367 |
+
|
368 |
+
|
369 |
+
def argument(
|
370 |
+
a: Union[Argument, TensorOptionsArguments, SelfArgument],
|
371 |
+
*,
|
372 |
+
cpp_no_default_args: Set[str],
|
373 |
+
method: bool,
|
374 |
+
faithful: bool,
|
375 |
+
symint: bool = False,
|
376 |
+
has_tensor_options: bool,
|
377 |
+
) -> List[Binding]:
|
378 |
+
def sub_argument(
|
379 |
+
a: Union[Argument, TensorOptionsArguments, SelfArgument]
|
380 |
+
) -> List[Binding]:
|
381 |
+
return argument(
|
382 |
+
a,
|
383 |
+
cpp_no_default_args=cpp_no_default_args,
|
384 |
+
method=method,
|
385 |
+
faithful=faithful,
|
386 |
+
symint=symint,
|
387 |
+
has_tensor_options=has_tensor_options,
|
388 |
+
)
|
389 |
+
|
390 |
+
if isinstance(a, Argument):
|
391 |
+
binds: ArgName
|
392 |
+
if a.name == "memory_format" and has_tensor_options:
|
393 |
+
binds = SpecialArgName.possibly_redundant_memory_format
|
394 |
+
else:
|
395 |
+
binds = a.name
|
396 |
+
default: Optional[str] = None
|
397 |
+
if a.name not in cpp_no_default_args and a.default is not None:
|
398 |
+
default = default_expr(a.default, a.type, symint=symint)
|
399 |
+
return [
|
400 |
+
Binding(
|
401 |
+
nctype=argument_type(a, binds=binds, symint=symint),
|
402 |
+
name=a.name,
|
403 |
+
default=default,
|
404 |
+
argument=a,
|
405 |
+
)
|
406 |
+
]
|
407 |
+
elif isinstance(a, TensorOptionsArguments):
|
408 |
+
if faithful:
|
409 |
+
return (
|
410 |
+
sub_argument(a.dtype)
|
411 |
+
+ sub_argument(a.layout)
|
412 |
+
+ sub_argument(a.device)
|
413 |
+
+ sub_argument(a.pin_memory)
|
414 |
+
)
|
415 |
+
else:
|
416 |
+
default = None
|
417 |
+
# Enforced by NativeFunction.__post_init__
|
418 |
+
assert "options" not in cpp_no_default_args
|
419 |
+
if all(x.default == "None" for x in a.all()):
|
420 |
+
default = "{}"
|
421 |
+
elif a.dtype.default == "long":
|
422 |
+
default = "at::kLong" # TODO: this is wrong
|
423 |
+
return [
|
424 |
+
Binding(
|
425 |
+
nctype=NamedCType("options", BaseCType(tensorOptionsT)),
|
426 |
+
name="options",
|
427 |
+
default=default,
|
428 |
+
argument=a,
|
429 |
+
)
|
430 |
+
]
|
431 |
+
elif isinstance(a, SelfArgument):
|
432 |
+
if method:
|
433 |
+
# Caller is responsible for installing implicit this in context!
|
434 |
+
return []
|
435 |
+
else:
|
436 |
+
return sub_argument(a.argument)
|
437 |
+
else:
|
438 |
+
assert_never(a)
|
439 |
+
|
440 |
+
|
441 |
+
def arguments(
|
442 |
+
arguments: Arguments,
|
443 |
+
*,
|
444 |
+
faithful: bool,
|
445 |
+
symint: bool = False,
|
446 |
+
method: bool,
|
447 |
+
cpp_no_default_args: Set[str],
|
448 |
+
) -> List[Binding]:
|
449 |
+
args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = []
|
450 |
+
if faithful:
|
451 |
+
args.extend(arguments.non_out)
|
452 |
+
args.extend(arguments.out)
|
453 |
+
else:
|
454 |
+
args.extend(arguments.out)
|
455 |
+
args.extend(arguments.non_out)
|
456 |
+
return [
|
457 |
+
r.no_default() if faithful else r
|
458 |
+
for a in args
|
459 |
+
for r in argument(
|
460 |
+
a,
|
461 |
+
faithful=faithful,
|
462 |
+
symint=symint,
|
463 |
+
method=method,
|
464 |
+
has_tensor_options=arguments.tensor_options is not None,
|
465 |
+
cpp_no_default_args=cpp_no_default_args,
|
466 |
+
)
|
467 |
+
]
|
venv/lib/python3.10/site-packages/torchgen/api/dispatcher.py
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import itertools
|
2 |
+
from typing import List, Sequence, Union
|
3 |
+
|
4 |
+
from torchgen.api import cpp
|
5 |
+
|
6 |
+
from torchgen.api.types import ArgName, Binding, CType, NamedCType
|
7 |
+
from torchgen.model import (
|
8 |
+
Argument,
|
9 |
+
FunctionSchema,
|
10 |
+
Return,
|
11 |
+
SelfArgument,
|
12 |
+
TensorOptionsArguments,
|
13 |
+
Type,
|
14 |
+
)
|
15 |
+
from torchgen.utils import assert_never, concatMap
|
16 |
+
|
17 |
+
# This file describes the translation of JIT schema to the dispatcher
|
18 |
+
# API, the *unboxed* calling convention by which invocations through
|
19 |
+
# the dispatcher are made. Historically, the dispatcher API matched
|
20 |
+
# the C++ API, but with the establishment of the boxed API, we've
|
21 |
+
# made changes to the dispatcher API to so that the unboxed API
|
22 |
+
# better aligns with the boxed API. The dispatcher API hooks heavily
|
23 |
+
# into our template based boxing/unboxing machinery, so changes
|
24 |
+
# to this convention will usually need template updates too.
|
25 |
+
#
|
26 |
+
# Prominent characteristics of the dispatcher API:
|
27 |
+
#
|
28 |
+
# - dtype, layout, device and pin_memory are represented as separate
|
29 |
+
# arguments.
|
30 |
+
#
|
31 |
+
|
32 |
+
|
33 |
+
def name(func: FunctionSchema) -> str:
|
34 |
+
return cpp.name(func)
|
35 |
+
|
36 |
+
|
37 |
+
def argumenttype_type(
|
38 |
+
t: Type,
|
39 |
+
*,
|
40 |
+
mutable: bool,
|
41 |
+
binds: ArgName,
|
42 |
+
remove_non_owning_ref_types: bool = False,
|
43 |
+
symint: bool = True,
|
44 |
+
) -> NamedCType:
|
45 |
+
# This is a faux amis. If it makes sense in the future to add
|
46 |
+
# more special cases here, or invert things so cpp.argument_type
|
47 |
+
# calls this, or just completely inline the function, please do
|
48 |
+
# it.
|
49 |
+
return cpp.argumenttype_type(
|
50 |
+
t,
|
51 |
+
mutable=mutable,
|
52 |
+
binds=binds,
|
53 |
+
symint=symint,
|
54 |
+
remove_non_owning_ref_types=remove_non_owning_ref_types,
|
55 |
+
)
|
56 |
+
|
57 |
+
|
58 |
+
def argument_type(
|
59 |
+
a: Argument,
|
60 |
+
*,
|
61 |
+
binds: ArgName,
|
62 |
+
remove_non_owning_ref_types: bool = False,
|
63 |
+
symint: bool = True,
|
64 |
+
) -> NamedCType:
|
65 |
+
return argumenttype_type(
|
66 |
+
a.type,
|
67 |
+
mutable=a.is_write,
|
68 |
+
binds=binds,
|
69 |
+
remove_non_owning_ref_types=remove_non_owning_ref_types,
|
70 |
+
symint=symint,
|
71 |
+
)
|
72 |
+
|
73 |
+
|
74 |
+
def returns_type(rs: Sequence[Return], *, symint: bool = True) -> CType:
|
75 |
+
# At present, there is no difference. But there could be!
|
76 |
+
return cpp.returns_type(rs, symint=symint)
|
77 |
+
|
78 |
+
|
79 |
+
def jit_arguments(func: FunctionSchema) -> List[Argument]:
|
80 |
+
def to_argument(
|
81 |
+
a: Union[Argument, TensorOptionsArguments, SelfArgument]
|
82 |
+
) -> List[Argument]:
|
83 |
+
if isinstance(a, Argument):
|
84 |
+
return [a]
|
85 |
+
elif isinstance(a, SelfArgument):
|
86 |
+
return [a.argument]
|
87 |
+
elif isinstance(a, TensorOptionsArguments):
|
88 |
+
return [a.dtype, a.layout, a.device, a.pin_memory]
|
89 |
+
else:
|
90 |
+
assert_never(a)
|
91 |
+
|
92 |
+
return list(
|
93 |
+
concatMap(
|
94 |
+
to_argument,
|
95 |
+
itertools.chain(
|
96 |
+
func.arguments.positional, func.arguments.kwarg_only, func.arguments.out
|
97 |
+
),
|
98 |
+
)
|
99 |
+
)
|
100 |
+
|
101 |
+
|
102 |
+
def argument(
|
103 |
+
a: Argument, *, remove_non_owning_ref_types: bool = False, symint: bool = True
|
104 |
+
) -> Binding:
|
105 |
+
return Binding(
|
106 |
+
nctype=argument_type(
|
107 |
+
a,
|
108 |
+
binds=a.name,
|
109 |
+
remove_non_owning_ref_types=remove_non_owning_ref_types,
|
110 |
+
symint=symint,
|
111 |
+
),
|
112 |
+
name=a.name,
|
113 |
+
argument=a,
|
114 |
+
)
|
115 |
+
|
116 |
+
|
117 |
+
def arguments(func: FunctionSchema, *, symint: bool = True) -> List[Binding]:
|
118 |
+
return [argument(a, symint=symint) for a in jit_arguments(func)]
|
venv/lib/python3.10/site-packages/torchgen/api/functionalization.py
ADDED
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, Optional
|
2 |
+
|
3 |
+
from torchgen.api import dispatcher
|
4 |
+
from torchgen.api.types import (
|
5 |
+
BaseCppType,
|
6 |
+
BaseCType,
|
7 |
+
Binding,
|
8 |
+
boolT,
|
9 |
+
ConstRefCType,
|
10 |
+
CType,
|
11 |
+
longT,
|
12 |
+
NamedCType,
|
13 |
+
tensorT,
|
14 |
+
)
|
15 |
+
from torchgen.model import (
|
16 |
+
Argument,
|
17 |
+
BaseTy,
|
18 |
+
BaseType,
|
19 |
+
FunctionSchema,
|
20 |
+
NativeFunction,
|
21 |
+
NativeFunctionsViewGroup,
|
22 |
+
)
|
23 |
+
|
24 |
+
|
25 |
+
# This file describes the translation of JIT schema to API's used
|
26 |
+
# when creating view lambdas that are used by the functionalization pass.
|
27 |
+
# There are two types of lambdas: forward lambdas and reverse lambdas.
|
28 |
+
# These API's mostly follow the dispatcher API, with a few quirks:
|
29 |
+
# - The lambda capture has to convert reference types to value types
|
30 |
+
# - While the forward lambda just directly calls into the at::_ops API
|
31 |
+
# (following the dispatcher convention), the logic here for the reverse lambda
|
32 |
+
# is responsible for generating both the call-site, and the declarations
|
33 |
+
# (which are implemented manually in the at::functionalization::impl namespace).
|
34 |
+
|
35 |
+
# The lambdas generated for each view op in the functionalization pass are of the form
|
36 |
+
# [capture_arguments](outer_arguments) -> returns_type {
|
37 |
+
# return name(inner_arguments);
|
38 |
+
# }
|
39 |
+
|
40 |
+
# Define some specific lambda input arguments.
|
41 |
+
base_binding = Binding(
|
42 |
+
name="base",
|
43 |
+
nctype=NamedCType(name="base", type=ConstRefCType(BaseCType(tensorT))),
|
44 |
+
argument=Argument(
|
45 |
+
name="base", type=BaseType(BaseTy.Tensor), default=None, annotation=None
|
46 |
+
),
|
47 |
+
default=None,
|
48 |
+
)
|
49 |
+
mutated_view_binding = Binding(
|
50 |
+
name="mutated_view",
|
51 |
+
nctype=NamedCType(name="mutated_view", type=ConstRefCType(BaseCType(tensorT))),
|
52 |
+
argument=Argument(
|
53 |
+
name="base", type=BaseType(BaseTy.Tensor), default=None, annotation=None
|
54 |
+
),
|
55 |
+
default=None,
|
56 |
+
)
|
57 |
+
mutated_view_idx_binding = Binding(
|
58 |
+
name="mutated_view_idx",
|
59 |
+
nctype=NamedCType(name="mutated_view_idx", type=BaseCType(longT)),
|
60 |
+
argument=Argument(
|
61 |
+
name="base", type=BaseType(BaseTy.Tensor), default=None, annotation=None
|
62 |
+
),
|
63 |
+
default=None,
|
64 |
+
)
|
65 |
+
reapply_views_binding = Binding(
|
66 |
+
name="reapply_views",
|
67 |
+
nctype=NamedCType(name="reapply_views", type=BaseCType(boolT)),
|
68 |
+
argument=Argument(
|
69 |
+
name="reapply_views", type=BaseType(BaseTy.bool), default=None, annotation=None
|
70 |
+
),
|
71 |
+
default=None,
|
72 |
+
)
|
73 |
+
|
74 |
+
InverseReturnModeT = BaseCppType("at::functionalization", "InverseReturnMode")
|
75 |
+
inverse_return_mode_binding = Binding(
|
76 |
+
name="inverse_return_mode",
|
77 |
+
nctype=NamedCType(name="inverse_return_mode", type=BaseCType(InverseReturnModeT)),
|
78 |
+
argument=Argument(
|
79 |
+
name="inverse_return_mode",
|
80 |
+
# NB: not actually a bool but it doesn't matter because this isn't used
|
81 |
+
type=BaseType(BaseTy.bool),
|
82 |
+
default=None,
|
83 |
+
annotation=None,
|
84 |
+
),
|
85 |
+
default=None,
|
86 |
+
)
|
87 |
+
|
88 |
+
|
89 |
+
# The lambda capture itself doesn't have a name.
|
90 |
+
# The name returned here corresponds to the name of the inner function called by the lambda.
|
91 |
+
def name(
|
92 |
+
g: NativeFunctionsViewGroup,
|
93 |
+
*,
|
94 |
+
is_reverse: bool,
|
95 |
+
include_namespace: bool,
|
96 |
+
reapply_views: Optional[bool] = None,
|
97 |
+
) -> str:
|
98 |
+
if reapply_views is None:
|
99 |
+
# reapply_views is only important for the fwd lambda,
|
100 |
+
# since we always plumb the runtime "reapply_views" argument into the reverse function.
|
101 |
+
assert is_reverse
|
102 |
+
if is_reverse:
|
103 |
+
return reverse_name(g.view, include_namespace)
|
104 |
+
# in the forward case, we just directly call into the at::_ops API (so we always need the namespace)
|
105 |
+
assert include_namespace
|
106 |
+
assert g.view_copy is not None
|
107 |
+
api_name = (
|
108 |
+
g.view.func.name.unambiguous_name()
|
109 |
+
if reapply_views
|
110 |
+
else g.view_copy.func.name.unambiguous_name()
|
111 |
+
)
|
112 |
+
return f"at::_ops::{api_name}::call"
|
113 |
+
|
114 |
+
|
115 |
+
def reverse_name(f: NativeFunction, include_namespace: bool) -> str:
|
116 |
+
# for the reverse: we plumb the "reapply_views" flag into that function and support
|
117 |
+
# both copy and non-copy variants. (We could avoid doing that, but that would require
|
118 |
+
# writing out twice as many view inverse functions).
|
119 |
+
api_name = f.func.name.unambiguous_name()
|
120 |
+
# in the reverse case, we codegen both the call-sites (which need the full namespace) and the declarations (which don't)
|
121 |
+
if include_namespace:
|
122 |
+
return f"at::functionalization::FunctionalInverses::{api_name}_inverse"
|
123 |
+
else:
|
124 |
+
return f"{api_name}_inverse"
|
125 |
+
|
126 |
+
|
127 |
+
def capture_arguments(func: FunctionSchema, *, is_reverse: bool) -> List[Binding]:
|
128 |
+
# capture arguments include all arguments except `self`.
|
129 |
+
# Importantly, they don't include any C++ reference types (or else we'll get a dangling reference in the capture),
|
130 |
+
# So any reference types (IntArrayRef) need to be converted to value types (vector<int64_t>)
|
131 |
+
args = func.arguments.flat_all
|
132 |
+
assert args[0].type == BaseType(BaseTy.Tensor)
|
133 |
+
non_self_args = args[1:]
|
134 |
+
non_self_value_bindings = [
|
135 |
+
dispatcher.argument(a, remove_non_owning_ref_types=True) for a in non_self_args
|
136 |
+
]
|
137 |
+
|
138 |
+
all_bindings = [
|
139 |
+
inverse_return_mode_binding if is_reverse else reapply_views_binding
|
140 |
+
]
|
141 |
+
all_bindings.extend(non_self_value_bindings)
|
142 |
+
return all_bindings
|
143 |
+
|
144 |
+
|
145 |
+
def returns_type(func: FunctionSchema) -> CType:
|
146 |
+
# Assertion: all view ops return tensor-like outputs
|
147 |
+
assert len(func.returns) >= 1
|
148 |
+
for ret in func.returns:
|
149 |
+
assert ret.type.is_tensor_like()
|
150 |
+
# However, the return type of the lambda is always an individual tensor.
|
151 |
+
# For multi-tensor outputs, each tensor needs to be tracked individually.
|
152 |
+
return BaseCType(tensorT)
|
153 |
+
|
154 |
+
|
155 |
+
def outer_arguments(*, is_reverse: bool) -> List[Binding]:
|
156 |
+
if is_reverse:
|
157 |
+
return [base_binding, mutated_view_binding, mutated_view_idx_binding]
|
158 |
+
else:
|
159 |
+
return [base_binding, mutated_view_idx_binding]
|
160 |
+
|
161 |
+
|
162 |
+
def inner_call_index(func: FunctionSchema) -> Optional[Binding]:
|
163 |
+
# For view ops that return multiple tensors (like `split`), we generate a separate lambda for each output.
|
164 |
+
# When we replay a view op that returns multiple tensors, we need to index into the output appropriately
|
165 |
+
if len(func.returns) > 1 or (
|
166 |
+
len(func.returns) == 1 and func.returns[0].type.is_list_like()
|
167 |
+
):
|
168 |
+
return mutated_view_idx_binding
|
169 |
+
return None
|
170 |
+
|
171 |
+
|
172 |
+
def inner_arguments(func: FunctionSchema, is_reverse: bool) -> List[Binding]:
|
173 |
+
args = func.arguments.flat_all
|
174 |
+
assert args[0].type == BaseType(BaseTy.Tensor)
|
175 |
+
non_self_args = args[1:]
|
176 |
+
# The forward lambda calls the at::_ops API, while the reverse lambda calls the view inverse API.
|
177 |
+
# Both of these follow the dispatcher API.
|
178 |
+
non_self_bindings = [dispatcher.argument(a) for a in non_self_args]
|
179 |
+
if not is_reverse:
|
180 |
+
# the forward lambda swaps out the original tensor argument with the lambd arg "base"
|
181 |
+
return [base_binding] + non_self_bindings
|
182 |
+
else:
|
183 |
+
# the reverse lambda does the same, but with an additional "mutated_view" arg
|
184 |
+
# additionally, we have a calling convention: for view ops that return multiple tensor outputs
|
185 |
+
# their corresponding view_inverse function takes in an additional index argument.
|
186 |
+
index_binding = inner_call_index(func)
|
187 |
+
if index_binding is not None:
|
188 |
+
return [
|
189 |
+
base_binding,
|
190 |
+
mutated_view_binding,
|
191 |
+
inverse_return_mode_binding,
|
192 |
+
index_binding,
|
193 |
+
] + non_self_bindings
|
194 |
+
else:
|
195 |
+
return [
|
196 |
+
base_binding,
|
197 |
+
mutated_view_binding,
|
198 |
+
inverse_return_mode_binding,
|
199 |
+
] + non_self_bindings
|
venv/lib/python3.10/site-packages/torchgen/api/lazy.py
ADDED
@@ -0,0 +1,464 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
2 |
+
|
3 |
+
from torchgen.api.types import (
|
4 |
+
BaseCppType,
|
5 |
+
BaseCType,
|
6 |
+
boolT,
|
7 |
+
CType,
|
8 |
+
deviceT,
|
9 |
+
doubleT,
|
10 |
+
generatorT,
|
11 |
+
layoutT,
|
12 |
+
ListCType,
|
13 |
+
longT,
|
14 |
+
memoryFormatT,
|
15 |
+
NamedCType,
|
16 |
+
OptionalCType,
|
17 |
+
scalarT,
|
18 |
+
scalarTypeT,
|
19 |
+
stringT,
|
20 |
+
SymIntT,
|
21 |
+
VectorCType,
|
22 |
+
)
|
23 |
+
|
24 |
+
from torchgen.model import (
|
25 |
+
Argument,
|
26 |
+
BaseTy,
|
27 |
+
BaseType,
|
28 |
+
FunctionSchema,
|
29 |
+
ListType,
|
30 |
+
OperatorName,
|
31 |
+
OptionalType,
|
32 |
+
Return,
|
33 |
+
TensorOptionsArguments,
|
34 |
+
Type,
|
35 |
+
)
|
36 |
+
|
37 |
+
|
38 |
+
_valueT: Optional[BaseCppType] = None
|
39 |
+
|
40 |
+
|
41 |
+
# A ValueT is an IR type which represents the computation of a Tensor. In other
|
42 |
+
# words, a PyTorch user will do operations on lazy tensors, and each output lazy
|
43 |
+
# tensor internally tracks a ValueT representing the IR node that would have
|
44 |
+
# actually produced the value of this tensor for real.
|
45 |
+
#
|
46 |
+
# This is configurable because different lazy tensor backends (LTC vs XLA) will
|
47 |
+
# have different IR representations. (Though, arguably, after unification they
|
48 |
+
# shouldn't!)
|
49 |
+
def getValueT() -> BaseCppType:
|
50 |
+
global _valueT
|
51 |
+
if not _valueT:
|
52 |
+
raise NotImplementedError(
|
53 |
+
"The value type needs to be set with setValueT() in run_gen_lazy_tensor()"
|
54 |
+
)
|
55 |
+
|
56 |
+
return _valueT
|
57 |
+
|
58 |
+
|
59 |
+
def setValueT(val: BaseCppType) -> None:
|
60 |
+
global _valueT
|
61 |
+
_valueT = val
|
62 |
+
|
63 |
+
|
64 |
+
# this is a bad hack. I need to refactor the data model to represent each arg in the schema as an object,
|
65 |
+
# making it easier to represent special properties of an arg.
|
66 |
+
tensorListValueT = BaseCppType("torch::lazy", "Value")
|
67 |
+
|
68 |
+
|
69 |
+
def process_ir_type(
|
70 |
+
typ: Type, properties: "LazyIrProperties", *, symint: bool
|
71 |
+
) -> Union[BaseCType, VectorCType, OptionalCType, ListCType]:
|
72 |
+
"""
|
73 |
+
This function takes a type from NativeFunctions and converts it for use with
|
74 |
+
lazy tensor codegen.
|
75 |
+
|
76 |
+
Type conversion for lazy currently consists of
|
77 |
+
(1) changing at::Tensors into lazy::Values
|
78 |
+
(2) wrapping everything in a BaseCType
|
79 |
+
(3) making cpp-reference types into cpp-value types (e.g. vector instead of IntArrayRef)
|
80 |
+
|
81 |
+
(1) converts at::Tensors to lazy::Values (which wrap lazy::Nodes, with which Lazy IR represents tensors.)
|
82 |
+
There is special handling for Optional[Tensor] or List[Tensor], etc- hence 'tensor-like'
|
83 |
+
|
84 |
+
This is incomplete- there are assertions in places that it's expected to need to add
|
85 |
+
more types as the codegen is used with more operators.
|
86 |
+
"""
|
87 |
+
if isinstance(typ, BaseType):
|
88 |
+
if typ.name == BaseTy.Tensor:
|
89 |
+
return BaseCType(getValueT())
|
90 |
+
elif typ.name == BaseTy.Scalar:
|
91 |
+
if properties.TreatScalarsAsConstants:
|
92 |
+
return BaseCType(scalarT)
|
93 |
+
# at::scalar has special handling,
|
94 |
+
# and is wrapped in an lazy::Value just like at::tensor
|
95 |
+
return BaseCType(getValueT())
|
96 |
+
elif typ.name == BaseTy.ScalarType:
|
97 |
+
return BaseCType(scalarTypeT)
|
98 |
+
elif typ.name == BaseTy.int:
|
99 |
+
return BaseCType(longT)
|
100 |
+
elif typ.name == BaseTy.SymInt:
|
101 |
+
if symint:
|
102 |
+
return BaseCType(getValueT())
|
103 |
+
else:
|
104 |
+
return BaseCType(longT)
|
105 |
+
elif typ.name == BaseTy.bool:
|
106 |
+
return BaseCType(boolT)
|
107 |
+
elif typ.name == BaseTy.float:
|
108 |
+
return BaseCType(doubleT)
|
109 |
+
elif typ.name == BaseTy.str:
|
110 |
+
return BaseCType(stringT)
|
111 |
+
elif typ.name == BaseTy.Device:
|
112 |
+
return BaseCType(deviceT)
|
113 |
+
elif typ.name == BaseTy.Generator:
|
114 |
+
return BaseCType(generatorT)
|
115 |
+
elif typ.name == BaseTy.Layout:
|
116 |
+
return BaseCType(layoutT)
|
117 |
+
elif typ.name == BaseTy.MemoryFormat:
|
118 |
+
return BaseCType(memoryFormatT)
|
119 |
+
else:
|
120 |
+
raise AssertionError(f"TODO add support for type {repr(typ)}")
|
121 |
+
elif isinstance(typ, OptionalType):
|
122 |
+
return OptionalCType(process_ir_type(typ.elem, properties, symint=symint))
|
123 |
+
elif isinstance(typ, ListType):
|
124 |
+
if str(typ.elem) == "Tensor?":
|
125 |
+
# TODO(whc) is this actually correct? or should it use a Vector like above
|
126 |
+
return ListCType(OptionalCType(BaseCType(getValueT())))
|
127 |
+
elif str(typ.elem) == "Tensor":
|
128 |
+
# this is a TensorList which comes in from GetTensorList as a Value
|
129 |
+
return BaseCType(tensorListValueT)
|
130 |
+
elif typ.elem == BaseType(BaseTy.SymInt):
|
131 |
+
# TODO: return a value type. The problem here is analogous to
|
132 |
+
# the problem with tensorListValueT: if you have SymInt[] you
|
133 |
+
# cannot conveniently save the list of Value directly, as nodes
|
134 |
+
# expect to save values as a vector for ALL arguments. So you
|
135 |
+
# need a separate IR node that represents all of the size nodes
|
136 |
+
# assembled into a list. I'm not an LTC dev so I don't want to
|
137 |
+
# figure it out right now. Y'all figure it out...
|
138 |
+
return VectorCType(BaseCType(longT))
|
139 |
+
|
140 |
+
else:
|
141 |
+
return VectorCType(process_ir_type(typ.elem, properties, symint=symint))
|
142 |
+
else:
|
143 |
+
raise AssertionError(f"unrecognized type {repr(typ)}")
|
144 |
+
|
145 |
+
|
146 |
+
# TODO: Determining this based off of CType is bad; this should be computed
|
147 |
+
# from Type directly; then the same logic as process_ir_type can be used
|
148 |
+
#
|
149 |
+
# Invariant: passed typ should be an *owning* CType (e.g., we will report
|
150 |
+
# that ArrayRef<Value> is NOT a value type)
|
151 |
+
def isValueType(typ: CType, properties: "Optional[LazyIrProperties]" = None) -> bool:
|
152 |
+
"""
|
153 |
+
Given a type, determine if it is a Value-like type. This is equivalent to
|
154 |
+
being Tensor-like, but assumes the type has already been transformed.
|
155 |
+
"""
|
156 |
+
if isinstance(typ, BaseCType):
|
157 |
+
# I am regretting my naming conventions, but now we are wrapping at::scalar in
|
158 |
+
# lazy value, while preserving other 'scalar' types as scalars in the IR
|
159 |
+
treat_scalars_as_constants = properties and properties.TreatScalarsAsConstants
|
160 |
+
return (
|
161 |
+
typ.type == getValueT()
|
162 |
+
or (typ.type == scalarT and not treat_scalars_as_constants)
|
163 |
+
or typ.type == SymIntT
|
164 |
+
)
|
165 |
+
elif typ == VectorCType(BaseCType(SymIntT)):
|
166 |
+
# TODO: report True for this
|
167 |
+
return False
|
168 |
+
elif isinstance(typ, (OptionalCType, ListCType, VectorCType)):
|
169 |
+
return isValueType(typ.elem, properties)
|
170 |
+
return False
|
171 |
+
|
172 |
+
|
173 |
+
def isSymIntType(typ: Type) -> bool:
|
174 |
+
return isinstance(typ, BaseType) and typ.name == BaseTy.SymInt
|
175 |
+
|
176 |
+
|
177 |
+
def isWrappedScalarType(typ: Type) -> bool:
|
178 |
+
"""
|
179 |
+
Given a type, determine if it is a c10::scalar which we will wrap in a lazy Value.
|
180 |
+
Since we literally change the type from scalarT to valueT, information is lost.
|
181 |
+
This function helps build a list of wrapped scalars to save that information
|
182 |
+
"""
|
183 |
+
if isinstance(typ, BaseType):
|
184 |
+
# I am regretting my naming conventions, but now we are wrapping at::scalar in
|
185 |
+
# lazy value, while preserving other 'scalar' types as scalars in the IR
|
186 |
+
return typ.name == BaseTy.Scalar
|
187 |
+
elif isinstance(typ, (OptionalType, ListType)):
|
188 |
+
return isWrappedScalarType(typ.elem)
|
189 |
+
return False
|
190 |
+
|
191 |
+
|
192 |
+
# TODO: dedupe with Type.is_generator_like
|
193 |
+
def isGeneratorType(typ: Type) -> bool:
|
194 |
+
if isinstance(typ, BaseType):
|
195 |
+
return typ.name == BaseTy.Generator
|
196 |
+
elif isinstance(typ, (OptionalType)):
|
197 |
+
return isGeneratorType(typ.elem)
|
198 |
+
return False
|
199 |
+
|
200 |
+
|
201 |
+
# This class caches a few derived properties computed from an Argument
|
202 |
+
# and LazyIrProperties
|
203 |
+
class LazyArgument:
|
204 |
+
name: str
|
205 |
+
orig_type: Type
|
206 |
+
lazy_type_: Optional[CType]
|
207 |
+
is_wrapped_scalar: bool
|
208 |
+
is_generator: bool
|
209 |
+
# TODO: this is lies, it is false for symint list
|
210 |
+
is_symint_or_list: bool
|
211 |
+
|
212 |
+
# Whether or not we are treating this as symint or not
|
213 |
+
symint: bool
|
214 |
+
|
215 |
+
# true if this argument is or contains a lazy IR value
|
216 |
+
is_lazy_value: bool
|
217 |
+
|
218 |
+
def __init__(self, arg: Argument, properties: "LazyIrProperties", *, symint: bool):
|
219 |
+
self.name = arg.name
|
220 |
+
self.orig_type = arg.type
|
221 |
+
self.symint = symint
|
222 |
+
self.is_optional = isinstance(arg.type, OptionalType)
|
223 |
+
self.is_generator = isGeneratorType(arg.type)
|
224 |
+
self.lazy_type_ = process_ir_type(arg.type, properties, symint=symint)
|
225 |
+
self.is_wrapped_scalar = isWrappedScalarType(arg.type)
|
226 |
+
self.is_symint_or_list = symint and (
|
227 |
+
isSymIntType(arg.type)
|
228 |
+
or (isinstance(arg.type, OptionalType) and isSymIntType(arg.type.elem))
|
229 |
+
# TODO: lists of symints are not currently treated as value types
|
230 |
+
# or (isinstance(arg.type, ListType) and isSymIntType(arg.type.elem))
|
231 |
+
)
|
232 |
+
|
233 |
+
self.is_lazy_value = isValueType(self.lazy_type, properties)
|
234 |
+
|
235 |
+
@property
|
236 |
+
def lazy_type(self) -> CType:
|
237 |
+
assert (
|
238 |
+
self.lazy_type_ is not None
|
239 |
+
), f"Attempted to access lazy_type for invalid argument {self.name}"
|
240 |
+
return self.lazy_type_
|
241 |
+
|
242 |
+
|
243 |
+
class LazyIrProperties:
|
244 |
+
"""Collection of properties for an IR node
|
245 |
+
|
246 |
+
The property groups are listed below. Each group is mutually
|
247 |
+
exclusive, meaning that only one property from each group can be True
|
248 |
+
at any one time. The properties can be accessed as if they were normal
|
249 |
+
attributes. The mutual exclusivity is automatically handled.
|
250 |
+
"""
|
251 |
+
|
252 |
+
Properties: Tuple[Tuple[str, ...], ...] = (
|
253 |
+
(
|
254 |
+
"ShapePrecompute", # Assume shape has been precomputed
|
255 |
+
"ShapeCompute", # Need to compute the shape on construction
|
256 |
+
"ShapeCache", # Utilize the shape cache to defer computation
|
257 |
+
),
|
258 |
+
(
|
259 |
+
"Lower", # Codegen full lower function
|
260 |
+
"LowerDeclOnly", # Codegen only lower function declaration
|
261 |
+
),
|
262 |
+
(
|
263 |
+
"CanBeReused", # Codegen full reuse function
|
264 |
+
"CanBeReusedDeclOnly", # Codegen only reuse function declaration
|
265 |
+
),
|
266 |
+
(
|
267 |
+
"CreateFn", # Codegen full create function
|
268 |
+
"CreateFnDeclOnly", # Codegen only create function declaration
|
269 |
+
),
|
270 |
+
(
|
271 |
+
"TreatScalarsAsConstants", # Treat Scalars as constants instead of handling like values
|
272 |
+
),
|
273 |
+
)
|
274 |
+
|
275 |
+
def __init__(self, *default_properties: str):
|
276 |
+
properties: Dict[Tuple[str, ...], Optional[str]] = dict.fromkeys(
|
277 |
+
LazyIrProperties.Properties
|
278 |
+
)
|
279 |
+
self.__dict__["properties"] = properties
|
280 |
+
for p in default_properties:
|
281 |
+
setattr(self, p, True)
|
282 |
+
|
283 |
+
def __getattr__(self, key: str) -> Any:
|
284 |
+
properties = self.__dict__["properties"]
|
285 |
+
for values in LazyIrProperties.Properties:
|
286 |
+
if key in values:
|
287 |
+
return properties[values] == key
|
288 |
+
|
289 |
+
return self.__getattribute__(key)
|
290 |
+
|
291 |
+
def __setattr__(self, key: str, value: Any) -> Any:
|
292 |
+
properties = self.__dict__["properties"]
|
293 |
+
for values in LazyIrProperties.Properties:
|
294 |
+
if key in values:
|
295 |
+
properties[values] = key if value else None
|
296 |
+
return value
|
297 |
+
|
298 |
+
raise KeyError(f"Invalid property: {key}")
|
299 |
+
|
300 |
+
|
301 |
+
# Inspired by a FunctionSchema object, a LazyIrSchema holds the schema of a Lazy IR node.
|
302 |
+
# Unlike a FunctionSchema, it has no round-trippable string form (relating to the YAML),
|
303 |
+
# but carries type information from a native FunctionSchema modified for use with IR nodes,
|
304 |
+
# and preserving original argument names.
|
305 |
+
#
|
306 |
+
# TODO: This is not idiomatic with how other torchgen APIs transform on schema.
|
307 |
+
class LazyIrSchema:
|
308 |
+
# The name of the operator this function schema describes.
|
309 |
+
name: "OperatorName"
|
310 |
+
|
311 |
+
positional_args: Tuple[LazyArgument, ...]
|
312 |
+
keyword_args: Tuple[LazyArgument, ...]
|
313 |
+
|
314 |
+
# TODO: Need to handle collisions with argument names at some point
|
315 |
+
returns: Tuple["Return", ...]
|
316 |
+
|
317 |
+
# if this schema has a Generator arg, list its orig ctype/name but don't
|
318 |
+
# build a LazyArgument since lazy IR doesn't support it
|
319 |
+
generator_arg: Optional[NamedCType] = None
|
320 |
+
|
321 |
+
# original function schema
|
322 |
+
func: FunctionSchema
|
323 |
+
|
324 |
+
# Whether or not we are code-genning for SymInt or not
|
325 |
+
symint: bool
|
326 |
+
|
327 |
+
properties: LazyIrProperties = LazyIrProperties(
|
328 |
+
# default properties
|
329 |
+
"ShapePrecompute",
|
330 |
+
"Lower",
|
331 |
+
"CanBeReused",
|
332 |
+
)
|
333 |
+
opkind: Optional[str] = None
|
334 |
+
|
335 |
+
def __init__(
|
336 |
+
self,
|
337 |
+
func: FunctionSchema,
|
338 |
+
properties: Optional[LazyIrProperties] = None,
|
339 |
+
*,
|
340 |
+
symint: bool,
|
341 |
+
):
|
342 |
+
if properties:
|
343 |
+
self.properties = properties
|
344 |
+
|
345 |
+
self.func = func
|
346 |
+
self.symint = symint
|
347 |
+
positional_args: List[LazyArgument] = []
|
348 |
+
for arg_field in ["pre_self_positional", "self_arg", "post_self_positional"]:
|
349 |
+
if arg_field == "self_arg" and func.arguments.self_arg is not None:
|
350 |
+
arg = func.arguments.self_arg.argument
|
351 |
+
positional_args.append(
|
352 |
+
LazyArgument(arg, self.properties, symint=symint)
|
353 |
+
)
|
354 |
+
elif getattr(func.arguments, arg_field) is not None:
|
355 |
+
positional_args.extend(
|
356 |
+
LazyArgument(arg, self.properties, symint=symint)
|
357 |
+
for arg in getattr(func.arguments, arg_field)
|
358 |
+
)
|
359 |
+
self.positional_args = tuple(positional_args)
|
360 |
+
|
361 |
+
keyword_args: List[LazyArgument] = []
|
362 |
+
for arg_field in [
|
363 |
+
"pre_tensor_options_kwarg_only",
|
364 |
+
"tensor_options",
|
365 |
+
"post_tensor_options_kwarg_only",
|
366 |
+
"out",
|
367 |
+
]:
|
368 |
+
curr_args = getattr(func.arguments, arg_field)
|
369 |
+
if curr_args is not None:
|
370 |
+
if isinstance(curr_args, TensorOptionsArguments):
|
371 |
+
curr_args = curr_args.all()
|
372 |
+
for arg in curr_args:
|
373 |
+
if isGeneratorType(arg.type):
|
374 |
+
assert (
|
375 |
+
self.generator_arg is None
|
376 |
+
), "We expect there is only one generator arg"
|
377 |
+
self.generator_arg = NamedCType(
|
378 |
+
arg.name, arg.type # type:ignore[arg-type]
|
379 |
+
)
|
380 |
+
keyword_args.extend(
|
381 |
+
LazyArgument(arg, self.properties, symint=symint)
|
382 |
+
for arg in curr_args
|
383 |
+
)
|
384 |
+
self.keyword_args = tuple(keyword_args)
|
385 |
+
self.name = func.name
|
386 |
+
self.returns = func.returns
|
387 |
+
|
388 |
+
@property
|
389 |
+
def node_name(self) -> str:
|
390 |
+
"""
|
391 |
+
Return camel-case version of op in node.
|
392 |
+
|
393 |
+
Note: This function also appends any `overload_name` in the operation.
|
394 |
+
For example, if the op is `bitwise_and.Tensor`, the returned name
|
395 |
+
will be `BitwiseAndTensor`.
|
396 |
+
"""
|
397 |
+
op_name = f"{self.name.name}_{self.name.overload_name}".lower()
|
398 |
+
return "".join(word.capitalize() or "" for word in op_name.split("_"))
|
399 |
+
|
400 |
+
@property
|
401 |
+
def aten_name(self) -> str:
|
402 |
+
return str(self.name.name)
|
403 |
+
|
404 |
+
@property
|
405 |
+
def base_name(self) -> str:
|
406 |
+
return f"{self.name.name.base}"
|
407 |
+
|
408 |
+
def filtered_args(
|
409 |
+
self,
|
410 |
+
positional: bool = True,
|
411 |
+
keyword: bool = True,
|
412 |
+
values: bool = True,
|
413 |
+
scalars: bool = True,
|
414 |
+
generator: bool = True,
|
415 |
+
) -> List[LazyArgument]:
|
416 |
+
# This function maintains the sorted order of arguments but provides different filtered views.
|
417 |
+
# Some parts of the code care about kwargs vs args (TS lowerings),
|
418 |
+
# other parts care about whether they need to wrap the arg in a lazy value or leave it alone.
|
419 |
+
# Generators are special cased, as they are needed for fallback/shape-inference but not supported
|
420 |
+
# in TS lowerings and therefore also omitted from lazy IR.
|
421 |
+
args: List[LazyArgument] = []
|
422 |
+
if positional:
|
423 |
+
args.extend(self.positional_args)
|
424 |
+
if keyword:
|
425 |
+
args.extend(self.keyword_args)
|
426 |
+
|
427 |
+
if values and scalars and generator:
|
428 |
+
return args
|
429 |
+
elif values and scalars:
|
430 |
+
return [a for a in args if not a.is_generator]
|
431 |
+
elif values:
|
432 |
+
return [a for a in args if a.is_lazy_value]
|
433 |
+
elif scalars:
|
434 |
+
return [
|
435 |
+
a
|
436 |
+
for a in args
|
437 |
+
if not a.is_lazy_value and (generator or not a.is_generator)
|
438 |
+
]
|
439 |
+
|
440 |
+
return []
|
441 |
+
|
442 |
+
@property
|
443 |
+
def positional_values(self) -> List[LazyArgument]:
|
444 |
+
return self.filtered_args(
|
445 |
+
positional=True, keyword=False, values=True, scalars=False
|
446 |
+
)
|
447 |
+
|
448 |
+
@property
|
449 |
+
def positional_scalars(self) -> List[LazyArgument]:
|
450 |
+
return self.filtered_args(
|
451 |
+
positional=True, keyword=False, values=False, scalars=True
|
452 |
+
)
|
453 |
+
|
454 |
+
@property
|
455 |
+
def keyword_values(self) -> List[LazyArgument]:
|
456 |
+
return self.filtered_args(
|
457 |
+
positional=False, keyword=True, values=True, scalars=False
|
458 |
+
)
|
459 |
+
|
460 |
+
@property
|
461 |
+
def keyword_scalars(self) -> List[LazyArgument]:
|
462 |
+
return self.filtered_args(
|
463 |
+
positional=False, keyword=True, values=False, scalars=True
|
464 |
+
)
|
venv/lib/python3.10/site-packages/torchgen/api/meta.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torchgen.model import NativeFunctionsGroup
|
2 |
+
|
3 |
+
# Follows dispatcher calling convention, but:
|
4 |
+
# - Mutable arguments not allowed. Meta functions are always
|
5 |
+
# written in functional form. Look at FunctionSchema.signature()
|
6 |
+
# - No tensor returns; instead we return a TensorMeta describing
|
7 |
+
# the tensor in question
|
8 |
+
|
9 |
+
|
10 |
+
def name(g: NativeFunctionsGroup) -> str:
|
11 |
+
# use the overload name from the functional version
|
12 |
+
return str(g.functional.func.name).replace(".", "_")
|
venv/lib/python3.10/site-packages/torchgen/api/native.py
ADDED
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, Optional, Sequence, Union
|
2 |
+
|
3 |
+
from torchgen import local
|
4 |
+
from torchgen.api import cpp
|
5 |
+
|
6 |
+
from torchgen.api.types import (
|
7 |
+
ArgName,
|
8 |
+
BaseCType,
|
9 |
+
Binding,
|
10 |
+
boolT,
|
11 |
+
ConstRefCType,
|
12 |
+
CType,
|
13 |
+
deviceT,
|
14 |
+
layoutT,
|
15 |
+
ListCType,
|
16 |
+
MutRefCType,
|
17 |
+
NamedCType,
|
18 |
+
OptionalCType,
|
19 |
+
scalarT,
|
20 |
+
scalarTypeT,
|
21 |
+
tensorT,
|
22 |
+
)
|
23 |
+
from torchgen.model import (
|
24 |
+
Argument,
|
25 |
+
FunctionSchema,
|
26 |
+
Return,
|
27 |
+
SelfArgument,
|
28 |
+
TensorOptionsArguments,
|
29 |
+
Type,
|
30 |
+
)
|
31 |
+
from torchgen.utils import assert_never
|
32 |
+
|
33 |
+
# This file describes the translation of JIT schema to the native functions API.
|
34 |
+
# This looks a lot like the C++ API (which makes historical sense, because the
|
35 |
+
# idea was you wrote native functions to implement functions in the C++ API),
|
36 |
+
# but over time we have evolved the C++ API without actually changing our
|
37 |
+
# native:: kernels. The intention is to make native API and dispatcher API
|
38 |
+
# line up as closely as possible, since this results in the least overhead
|
39 |
+
# (no translation is needed from dispatcher API to native API).
|
40 |
+
#
|
41 |
+
# NB: this is symint aware, you will get the non-SymInt variant for some
|
42 |
+
# dispatch entries and SymInt for others.
|
43 |
+
|
44 |
+
|
45 |
+
def name(func: FunctionSchema) -> str:
|
46 |
+
name = str(func.name.name)
|
47 |
+
# TODO: delete this!
|
48 |
+
if func.is_out_fn():
|
49 |
+
name += "_out"
|
50 |
+
if func.name.overload_name:
|
51 |
+
name += f"_{func.name.overload_name}"
|
52 |
+
return name
|
53 |
+
|
54 |
+
|
55 |
+
def argumenttype_type(
|
56 |
+
t: Type, *, mutable: bool, binds: ArgName, symint: bool
|
57 |
+
) -> NamedCType:
|
58 |
+
if str(t) == "Tensor?":
|
59 |
+
tensor_type: OptionalCType = OptionalCType(BaseCType(tensorT))
|
60 |
+
if mutable and not local.use_const_ref_for_mutable_tensors():
|
61 |
+
return NamedCType(binds, MutRefCType(tensor_type))
|
62 |
+
else:
|
63 |
+
return NamedCType(binds, ConstRefCType(tensor_type))
|
64 |
+
elif str(t) == "Tensor?[]":
|
65 |
+
return NamedCType(
|
66 |
+
binds, ConstRefCType(ListCType(OptionalCType(BaseCType(tensorT))))
|
67 |
+
)
|
68 |
+
elif str(t) == "Scalar":
|
69 |
+
return NamedCType(binds, ConstRefCType(BaseCType(scalarT)))
|
70 |
+
elif str(t) == "Scalar?":
|
71 |
+
return NamedCType(binds, ConstRefCType(OptionalCType(BaseCType(scalarT))))
|
72 |
+
return cpp.argumenttype_type(t, mutable=mutable, binds=binds, symint=symint)
|
73 |
+
|
74 |
+
|
75 |
+
def returns_type(rs: Sequence[Return], *, symint: bool) -> CType:
|
76 |
+
return cpp.returns_type(rs, symint=symint)
|
77 |
+
|
78 |
+
|
79 |
+
def argument_type(a: Argument, *, binds: ArgName, symint: bool) -> NamedCType:
|
80 |
+
return argumenttype_type(a.type, mutable=a.is_write, binds=binds, symint=symint)
|
81 |
+
|
82 |
+
|
83 |
+
def argument(
|
84 |
+
a: Union[Argument, SelfArgument, TensorOptionsArguments],
|
85 |
+
*,
|
86 |
+
is_out: bool,
|
87 |
+
symint: bool,
|
88 |
+
) -> List[Binding]:
|
89 |
+
# Ideally, we NEVER default native functions. However, there are a number
|
90 |
+
# of functions that call native:: directly and rely on the defaulting
|
91 |
+
# existing. So for BC, we generate defaults for non-out variants (but not
|
92 |
+
# for out variants, where it is impossible to generate an appropriate
|
93 |
+
# default)
|
94 |
+
should_default = not is_out
|
95 |
+
if isinstance(a, Argument):
|
96 |
+
default: Optional[str] = None
|
97 |
+
if should_default and a.default is not None:
|
98 |
+
default = cpp.default_expr(a.default, a.type, symint=symint)
|
99 |
+
return [
|
100 |
+
Binding(
|
101 |
+
nctype=argument_type(a, binds=a.name, symint=symint),
|
102 |
+
name=a.name,
|
103 |
+
default=default,
|
104 |
+
argument=a,
|
105 |
+
)
|
106 |
+
]
|
107 |
+
elif isinstance(a, SelfArgument):
|
108 |
+
# Erase SelfArgument from the distinction
|
109 |
+
return argument(a.argument, is_out=is_out, symint=symint)
|
110 |
+
elif isinstance(a, TensorOptionsArguments):
|
111 |
+
default = None
|
112 |
+
if should_default:
|
113 |
+
default = "{}"
|
114 |
+
# TODO: Not sure why the arguments assigned here are for
|
115 |
+
# TensorOptionsArguments and not the constituent pieces. It seems
|
116 |
+
# to matter
|
117 |
+
return [
|
118 |
+
Binding(
|
119 |
+
nctype=NamedCType("dtype", OptionalCType(BaseCType(scalarTypeT))),
|
120 |
+
name="dtype",
|
121 |
+
default=default,
|
122 |
+
argument=a,
|
123 |
+
),
|
124 |
+
Binding(
|
125 |
+
nctype=NamedCType("layout", OptionalCType(BaseCType(layoutT))),
|
126 |
+
name="layout",
|
127 |
+
default=default,
|
128 |
+
argument=a,
|
129 |
+
),
|
130 |
+
Binding(
|
131 |
+
nctype=NamedCType("device", OptionalCType(BaseCType(deviceT))),
|
132 |
+
name="device",
|
133 |
+
default=default,
|
134 |
+
argument=a,
|
135 |
+
),
|
136 |
+
Binding(
|
137 |
+
nctype=NamedCType("pin_memory", OptionalCType(BaseCType(boolT))),
|
138 |
+
name="pin_memory",
|
139 |
+
default=default,
|
140 |
+
argument=a,
|
141 |
+
),
|
142 |
+
]
|
143 |
+
else:
|
144 |
+
assert_never(a)
|
145 |
+
|
146 |
+
|
147 |
+
def arguments(func: FunctionSchema, *, symint: bool) -> List[Binding]:
|
148 |
+
args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = []
|
149 |
+
args.extend(func.arguments.non_out)
|
150 |
+
args.extend(func.arguments.out)
|
151 |
+
return [
|
152 |
+
r for arg in args for r in argument(arg, symint=symint, is_out=func.is_out_fn())
|
153 |
+
]
|
venv/lib/python3.10/site-packages/torchgen/api/python.py
ADDED
@@ -0,0 +1,1509 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass
|
2 |
+
from typing import Dict, List, Optional, Sequence, Set, Tuple, Union
|
3 |
+
|
4 |
+
from torchgen.api import cpp
|
5 |
+
|
6 |
+
from torchgen.api.types import Binding, CppSignature, CppSignatureGroup
|
7 |
+
from torchgen.gen import pythonify_default
|
8 |
+
from torchgen.model import (
|
9 |
+
Argument,
|
10 |
+
BaseTy,
|
11 |
+
BaseType,
|
12 |
+
FunctionSchema,
|
13 |
+
ListType,
|
14 |
+
NativeFunction,
|
15 |
+
OptionalType,
|
16 |
+
Return,
|
17 |
+
Type,
|
18 |
+
Variant,
|
19 |
+
)
|
20 |
+
|
21 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
22 |
+
#
|
23 |
+
# Data Models
|
24 |
+
#
|
25 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
26 |
+
#
|
27 |
+
# [Notes] python binding codegen
|
28 |
+
#
|
29 |
+
# The Python binding codegen produces code that takes the input list of
|
30 |
+
# PyObjects, finds the matching ATen C++ function using PythonArgParser,
|
31 |
+
# converts the PyObjects into C++ types and calls the ATen C++ function:
|
32 |
+
#
|
33 |
+
# +--------+ parsing +------------------------+ binding +-----------------------+
|
34 |
+
# | PyObjs | ---------> | PythonArgParser Output | ---------> | Cpp Function Dispatch |
|
35 |
+
# +--------+ +------------------------+ +-----------------------+
|
36 |
+
#
|
37 |
+
# The following examples demonstrate the data models the Python binding
|
38 |
+
# codegen needs to deal with and the tasks it needs to accomplish. It
|
39 |
+
# helps understand the purpose of the new data types we introduced below.
|
40 |
+
#
|
41 |
+
# - Function Schema (source of truth)
|
42 |
+
#
|
43 |
+
# aten::empty.names(int[] size, *, Dimname[]? names,
|
44 |
+
# ScalarType? dtype=None, Layout? layout=None,
|
45 |
+
# Device? device=None, bool? pin_memory=None,
|
46 |
+
# MemoryFormat? memory_format=None) -> Tensor
|
47 |
+
#
|
48 |
+
# - Python Signature
|
49 |
+
#
|
50 |
+
# It's used to generate input schema string for PythonArgParser.
|
51 |
+
# Note: TensorOptions fields are reordered and the additional
|
52 |
+
# 'requires_grad' field is added:
|
53 |
+
#
|
54 |
+
# empty(IntArrayRef size, *, DimnameList? names,
|
55 |
+
# MemoryFormat? memory_format=None, ScalarType dtype=None,
|
56 |
+
# Layout layout=torch.strided, Device device=None,
|
57 |
+
# bool pin_memory=False, bool requires_grad=False)
|
58 |
+
#
|
59 |
+
# - C++ Signature
|
60 |
+
#
|
61 |
+
# It's used to generate C++ lambda formals & dispatch call.
|
62 |
+
# Note: the scattered TensorOptions fields are packed into 'options'.
|
63 |
+
#
|
64 |
+
# auto dispatch_empty =
|
65 |
+
# [](IntArrayRef size, c10::optional<DimnameList> names,
|
66 |
+
# const TensorOptions & options,
|
67 |
+
# c10::optional<MemoryFormat> memory_format) -> Tensor {
|
68 |
+
# pybind11::gil_scoped_release no_gil;
|
69 |
+
# return torch::empty(size, names, options, memory_format);
|
70 |
+
# };
|
71 |
+
#
|
72 |
+
# - Binding between Python Arguments and C++ Arguments
|
73 |
+
#
|
74 |
+
# Given a set of Python Arguments in scope, we need produce the
|
75 |
+
# binding expressions that translate the Python API into C++ API:
|
76 |
+
#
|
77 |
+
# Python Args Cpp Args Binding Exprs
|
78 |
+
# -----------------------------------------------------------------
|
79 |
+
# 0: size size '_r.intlist(0)'
|
80 |
+
# 1: names names 'names' [special init]
|
81 |
+
# 2: memory_format -------+
|
82 |
+
# 3: dtype -----+-|--> options 'options' [special packing]
|
83 |
+
# 4: layout / |
|
84 |
+
# 5: device / +--> memory_format '_r.memoryformatOptional(2)'
|
85 |
+
# 6: pin_memory /
|
86 |
+
# 7: requires_grad -+
|
87 |
+
#
|
88 |
+
# So the full dispatch expression would look like:
|
89 |
+
#
|
90 |
+
# dispatch_empty(_r.intlist(0), names, options,
|
91 |
+
# _r.memoryformatOptional(2))
|
92 |
+
#
|
93 |
+
# Where does 'names' come from? It involves special local init:
|
94 |
+
#
|
95 |
+
# auto __names = _r.toDimnameListOptional(1);
|
96 |
+
# c10::optional<DimnameList> names =
|
97 |
+
# __names ? c10::make_optional(DimnameList(__names.value()))
|
98 |
+
# : c10::nullopt;
|
99 |
+
#
|
100 |
+
# Where does 'options' come from? It involves special local init
|
101 |
+
# for TensorOptions. Note that Python side has the additional
|
102 |
+
# 'requires_grad' field:
|
103 |
+
#
|
104 |
+
# const auto options = TensorOptions()
|
105 |
+
# .dtype(_r.scalartype(3))
|
106 |
+
# .device(_r.device(5))
|
107 |
+
# .layout(_r.layoutOptional(4))
|
108 |
+
# .requires_grad(_r.toBool(7))
|
109 |
+
# .pinned_memory(_r.toBool(6));
|
110 |
+
#
|
111 |
+
# In some other cases one Python Argument can map to multiple C++
|
112 |
+
# Arguments. For example:
|
113 |
+
#
|
114 |
+
# aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False)
|
115 |
+
# -> (Tensor values, Tensor indices)
|
116 |
+
#
|
117 |
+
# Python Args Cpp Args Binding Exprs
|
118 |
+
# ---------------------------------------------------------------------
|
119 |
+
# +----> max 'out[0]'
|
120 |
+
# /-----> max_values 'out[1]
|
121 |
+
# 0: input / self '_r.tensor(0)'
|
122 |
+
# 1: dim / dim '_r.dimname(1)'
|
123 |
+
# 2: keepdim / keepdim '_r.toBool(2)'
|
124 |
+
# 3: out -----+ [local init] out '_r.tensorlist_n<2>(3)'
|
125 |
+
#
|
126 |
+
# As demonstrated above, the binding can involve reordering,
|
127 |
+
# packing, unpacking and special local inits.
|
128 |
+
#
|
129 |
+
#
|
130 |
+
# Let's look at a concrete example:
|
131 |
+
#
|
132 |
+
# static PythonArgParser parser({
|
133 |
+
# "abs(Tensor input, *, Tensor out=None)",
|
134 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
135 |
+
# ^
|
136 |
+
# +--- Python Schema, represented by PythonSignature and PythonArgument
|
137 |
+
#
|
138 |
+
# }, /*traceable=*/true);
|
139 |
+
#
|
140 |
+
# ParsedArgs<2> parsed_args;
|
141 |
+
# auto _r = parser.parse(nullptr, args, kwargs, parsed_args);
|
142 |
+
#
|
143 |
+
# ...
|
144 |
+
#
|
145 |
+
# if (_r.isNone(1)) {
|
146 |
+
# ~~~~~~~~~~~~ <--- Scattered PythonArgParser output (arg name = 'out')
|
147 |
+
# represented by PythonArgParserOutputExpr
|
148 |
+
#
|
149 |
+
# // aten::abs(Tensor self) -> Tensor
|
150 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
151 |
+
# ^
|
152 |
+
# +--- NativeFunction schema, base version
|
153 |
+
#
|
154 |
+
# auto dispatch_abs = [](const Tensor & self) -> Tensor {
|
155 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
156 |
+
# ^
|
157 |
+
# +--- dispatch_lambda_args / dispatch_lambda_return_str
|
158 |
+
# generated from NativeFunction / CppSignature
|
159 |
+
# (deprecated PythonSignature is special)
|
160 |
+
# arguments are represented by DispatchLambdaArgument
|
161 |
+
#
|
162 |
+
# pybind11::gil_scoped_release no_gil;
|
163 |
+
# return self.abs();
|
164 |
+
# ~~~~~~~~~~~ <--- cpp_dispatch_target / cpp_dispatch_exprs
|
165 |
+
# generated from NativeFunction / CppSignature
|
166 |
+
# };
|
167 |
+
# return wrap(dispatch_abs(_r.tensor(0)));
|
168 |
+
# ~~~~~~~~~~~~~
|
169 |
+
# ^
|
170 |
+
# +--- dispatch_lambda_exprs
|
171 |
+
# binding PythonArgParserOutputExpr (python args)
|
172 |
+
# and DispatchLambdaArgument (c++ args)
|
173 |
+
#
|
174 |
+
# } else {
|
175 |
+
# // aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
176 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
177 |
+
# ^
|
178 |
+
# +--- NativeFunction schema, out-variant
|
179 |
+
#
|
180 |
+
# auto dispatch_abs_out = [](Tensor out, const Tensor & self) -> Tensor {
|
181 |
+
# pybind11::gil_scoped_release no_gil;
|
182 |
+
# return at::abs_out(out, self);
|
183 |
+
# };
|
184 |
+
# return wrap(dispatch_abs_out(_r.tensor(1), _r.tensor(0)));
|
185 |
+
# }
|
186 |
+
#
|
187 |
+
#
|
188 |
+
# [Notes] python interface codegen
|
189 |
+
# The python dataclasses below are used used to generate both python binding code
|
190 |
+
# and pyi type hint signatures.
|
191 |
+
# In theory these two should look very similar, but there are number of differences
|
192 |
+
# in how pyi signatures vs. python_arg_parser signatures are generated.
|
193 |
+
# These differences have been encapsulated in signature_str() vs. signature_str_pyi()
|
194 |
+
# to display the full signatures, and argument_str() vs argument_str_pyi() to display arguments.
|
195 |
+
# For examples, only pyi signatures include return types.
|
196 |
+
|
197 |
+
|
198 |
+
@dataclass(frozen=True)
|
199 |
+
class PythonReturns:
|
200 |
+
returns: Tuple[Return, ...]
|
201 |
+
|
202 |
+
|
203 |
+
@dataclass(frozen=True)
|
204 |
+
class PythonArgument:
|
205 |
+
name: str
|
206 |
+
type: Type
|
207 |
+
default: Optional[str]
|
208 |
+
|
209 |
+
# Used to generate the default init expr for some PythonArgParser outputs, e.g.:
|
210 |
+
#
|
211 |
+
# _r.layoutWithDefault(3, layout_from_backend(self.options().backend())))
|
212 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
213 |
+
# ^
|
214 |
+
# +--- default_init str
|
215 |
+
default_init: Optional[str]
|
216 |
+
|
217 |
+
# Compute argument formal for python argument parsing.
|
218 |
+
# Needs to be consistent with torch/csrc/utils/python_arg_parser.h.
|
219 |
+
def argument_str(self, *, method: bool = False, symint: bool = True) -> str:
|
220 |
+
type_str = (
|
221 |
+
argument_type_str(self.type, symint=symint)
|
222 |
+
.replace("const ", "")
|
223 |
+
.replace(" &", "")
|
224 |
+
)
|
225 |
+
|
226 |
+
name = self.name
|
227 |
+
# s/self/input/ outside method bindings
|
228 |
+
# [old codegen] TODO: remove this? doesn't rename in codegen, it's just
|
229 |
+
# for the parse string
|
230 |
+
if name == "self" and type_str in ["Tensor", "Number"] and not method:
|
231 |
+
name = "input"
|
232 |
+
|
233 |
+
# add default
|
234 |
+
if self.default is not None:
|
235 |
+
default = {
|
236 |
+
"nullptr": "None",
|
237 |
+
"c10::nullopt": "None",
|
238 |
+
"{}": "None",
|
239 |
+
}.get(self.default, self.default)
|
240 |
+
return f"{type_str} {name}={default}"
|
241 |
+
else:
|
242 |
+
return f"{type_str} {name}"
|
243 |
+
|
244 |
+
def argument_str_pyi(
|
245 |
+
self, *, method: bool = False, deprecated: bool = False
|
246 |
+
) -> str:
|
247 |
+
type_str = argument_type_str_pyi(self.type)
|
248 |
+
|
249 |
+
name = self.name
|
250 |
+
# s/self/input/ outside method bindings
|
251 |
+
# [old codegen] TODO: remove this? doesn't rename in codegen, it's just
|
252 |
+
# for the parse string
|
253 |
+
if name == "self" and type_str == "Tensor" and not method and not deprecated:
|
254 |
+
name = "input"
|
255 |
+
|
256 |
+
if name == "from": # from is a Python keyword...
|
257 |
+
name += "_"
|
258 |
+
|
259 |
+
# pyi merges the _out and functional variants into the same signature, with an optional out arg
|
260 |
+
if name == "out" and type_str == "Tensor" and not deprecated:
|
261 |
+
type_str = "Optional[" + type_str + "]"
|
262 |
+
|
263 |
+
# pyi deprecated signatures don't get defaults for their out arg
|
264 |
+
treat_as_no_default = (
|
265 |
+
deprecated
|
266 |
+
and isinstance(self, PythonOutArgument)
|
267 |
+
and self.default == "None"
|
268 |
+
)
|
269 |
+
|
270 |
+
# add default
|
271 |
+
if self.default is not None and not treat_as_no_default:
|
272 |
+
if (
|
273 |
+
isinstance(self.type, ListType)
|
274 |
+
and self.type.elem == BaseType(BaseTy.int)
|
275 |
+
and self.default.startswith("{")
|
276 |
+
and self.default.endswith("}")
|
277 |
+
):
|
278 |
+
default = "(" + self.default[1:-1] + ")"
|
279 |
+
else:
|
280 |
+
default = {
|
281 |
+
"nullptr": "None",
|
282 |
+
"c10::nullopt": "None",
|
283 |
+
"{}": "None",
|
284 |
+
"MemoryFormat::Contiguous": "contiguous_format",
|
285 |
+
"QScheme::PER_TENSOR_AFFINE": "per_tensor_affine",
|
286 |
+
}.get(self.default, self.default)
|
287 |
+
return f"{name}: {type_str} = {default}"
|
288 |
+
else:
|
289 |
+
return f"{name}: {type_str}"
|
290 |
+
|
291 |
+
|
292 |
+
@dataclass(frozen=True)
|
293 |
+
class PythonOutArgument(PythonArgument):
|
294 |
+
# In Python signature multiple output fields are packed into one 'out' argument.
|
295 |
+
# When binding to C++, it's first binded to a local 'out' variable:
|
296 |
+
# 'auto out = _r.tensorlist_n<2>(2);',
|
297 |
+
# then binded to scattered C++ output arguments as 'out[0]', 'out[1]', and etc.
|
298 |
+
# TODO: maybe don't need keep scattered out fields for python signature?
|
299 |
+
outputs: Tuple[PythonArgument, ...]
|
300 |
+
|
301 |
+
@staticmethod
|
302 |
+
def from_outputs(
|
303 |
+
outputs: Tuple[PythonArgument, ...]
|
304 |
+
) -> Optional["PythonOutArgument"]:
|
305 |
+
if not outputs:
|
306 |
+
return None
|
307 |
+
|
308 |
+
size = len(outputs)
|
309 |
+
if size == 1:
|
310 |
+
return PythonOutArgument(
|
311 |
+
name=outputs[0].name,
|
312 |
+
type=outputs[0].type,
|
313 |
+
default="None",
|
314 |
+
default_init=None,
|
315 |
+
outputs=outputs,
|
316 |
+
)
|
317 |
+
elif size > 1:
|
318 |
+
if any(not a.type.is_tensor_like() for a in outputs):
|
319 |
+
raise RuntimeError(f"Unsupported output type: {outputs}")
|
320 |
+
return PythonOutArgument(
|
321 |
+
name="out",
|
322 |
+
# TODO: shouldn't this be OptionalType[ListType[...]], since it defaults to None?
|
323 |
+
type=ListType(BaseType(BaseTy.Tensor), size),
|
324 |
+
default="None",
|
325 |
+
default_init=None,
|
326 |
+
outputs=outputs,
|
327 |
+
)
|
328 |
+
raise AssertionError(r"Unexpected PythonOutArgument size")
|
329 |
+
|
330 |
+
|
331 |
+
@dataclass(frozen=True)
|
332 |
+
class PythonSignature:
|
333 |
+
# Base operator name, without inplace/outplace suffix.
|
334 |
+
name: str
|
335 |
+
|
336 |
+
# Positional arguments.
|
337 |
+
# TODO: create a dedicated SelfArgument type for 'self'?
|
338 |
+
input_args: Tuple[PythonArgument, ...]
|
339 |
+
|
340 |
+
# Keyword arguments excluding the 'out' argument and scattered kwargs belonging
|
341 |
+
# to TensorOptions (dtype, layout, device, pin_memory, requires_grad, etc).
|
342 |
+
input_kwargs: Tuple[PythonArgument, ...]
|
343 |
+
|
344 |
+
output_args: Optional[PythonOutArgument]
|
345 |
+
|
346 |
+
# Return types, which are only used by pyi
|
347 |
+
returns: PythonReturns
|
348 |
+
|
349 |
+
# These are scattered kwargs arguments belonging to TensorOptions.
|
350 |
+
# When binding to C++, they are packed into a TensorOptions object 'options'.
|
351 |
+
# It's possible that the C++ signature doesn't take TensorOptions object (e.g.
|
352 |
+
# for out variant), in which case they will be used as scattered fields without
|
353 |
+
# being packed into 'options'.
|
354 |
+
# TODO: maybe create a PythonTensorOptionsArgument?
|
355 |
+
tensor_options_args: Tuple[PythonArgument, ...]
|
356 |
+
|
357 |
+
# method or function signature?
|
358 |
+
method: bool
|
359 |
+
|
360 |
+
@property
|
361 |
+
def deprecated(self) -> bool:
|
362 |
+
return False
|
363 |
+
|
364 |
+
def arguments(
|
365 |
+
self, *, skip_outputs: bool = False, skip_tensor_options: bool = False
|
366 |
+
) -> Tuple[Union[PythonArgument, PythonOutArgument], ...]:
|
367 |
+
result: List[Union[PythonArgument, PythonOutArgument]] = []
|
368 |
+
result.extend(self.input_args)
|
369 |
+
result.extend(self.input_kwargs)
|
370 |
+
if self.output_args is not None and not skip_outputs:
|
371 |
+
result.append(self.output_args)
|
372 |
+
if not skip_tensor_options:
|
373 |
+
result.extend(self.tensor_options_args)
|
374 |
+
return tuple(result)
|
375 |
+
|
376 |
+
def arguments_count(self) -> int:
|
377 |
+
return len(self.arguments())
|
378 |
+
|
379 |
+
def output_idx(self) -> int:
|
380 |
+
return len(self.input_args) + len(self.input_kwargs)
|
381 |
+
|
382 |
+
# [old codegen] Compute the Python function signature for argument parsing,
|
383 |
+
# as specified in torch/csrc/utils/python_arg_parser.h. WARNING:
|
384 |
+
# this is NOT the same type signature as specified by PEP 484
|
385 |
+
# as understood by mypy; our format was independently developed
|
386 |
+
# and has some quirks to make it more suitable specifically
|
387 |
+
# for error parsing.
|
388 |
+
#
|
389 |
+
# For a translation to mypy-valid type signatures, see
|
390 |
+
# signature_str_pyi().
|
391 |
+
def signature_str(self, *, skip_outputs: bool = False, symint: bool = True) -> str:
|
392 |
+
args = self.arguments(skip_outputs=skip_outputs)
|
393 |
+
schema_formals: List[str] = [
|
394 |
+
a.argument_str(method=self.method, symint=symint) for a in args
|
395 |
+
]
|
396 |
+
positional_argc = len(self.input_args)
|
397 |
+
if len(schema_formals) > positional_argc:
|
398 |
+
schema_formals.insert(positional_argc, "*")
|
399 |
+
|
400 |
+
return f'{self.name}({", ".join(schema_formals)})'
|
401 |
+
|
402 |
+
def signature_str_pyi(self, *, skip_outputs: bool = False) -> str:
|
403 |
+
args = self.arguments(skip_outputs=skip_outputs)
|
404 |
+
schema_formals: List[str] = [
|
405 |
+
a.argument_str_pyi(method=self.method) for a in args
|
406 |
+
]
|
407 |
+
positional_argc = len(self.input_args)
|
408 |
+
if len(schema_formals) > positional_argc:
|
409 |
+
schema_formals.insert(positional_argc, "*")
|
410 |
+
|
411 |
+
# only pyi signatures include returns
|
412 |
+
returns_str = returns_str_pyi(self)
|
413 |
+
# pyi also includes self (with no typing/defaults) for methods
|
414 |
+
if self.method:
|
415 |
+
schema_formals.insert(0, "self")
|
416 |
+
return f'def {self.name}({", ".join(schema_formals)}) -> {returns_str}: ...'
|
417 |
+
|
418 |
+
def signature_str_pyi_vararg(self, *, skip_outputs: bool = False) -> Optional[str]:
|
419 |
+
# only pyi uses vararg signatures
|
420 |
+
args = self.arguments(skip_outputs=skip_outputs)
|
421 |
+
schema_formals: List[str] = [
|
422 |
+
a.argument_str_pyi(method=self.method) for a in args
|
423 |
+
]
|
424 |
+
# vararg only applies to pyi signatures. vararg variants are not generated for all signatures
|
425 |
+
num_args = self.arguments_count()
|
426 |
+
num_positionalargs = len(self.input_args)
|
427 |
+
|
428 |
+
have_vararg_version = False
|
429 |
+
if num_args > 0:
|
430 |
+
vararg_type = args[0].type
|
431 |
+
if (
|
432 |
+
isinstance(vararg_type, ListType)
|
433 |
+
and str(vararg_type.elem) in ["int", "SymInt"]
|
434 |
+
and num_positionalargs == 1
|
435 |
+
):
|
436 |
+
have_vararg_version = True
|
437 |
+
|
438 |
+
if not have_vararg_version:
|
439 |
+
return None
|
440 |
+
# Below are the major changes in vararg vs. regular pyi signatures
|
441 |
+
# vararg signatures also omit the asterix
|
442 |
+
schema_formals[0] = "*" + args[0].name + ": _int"
|
443 |
+
|
444 |
+
returns_str = returns_str_pyi(self)
|
445 |
+
# pyi also includes self (with no typing/defaults) for methods
|
446 |
+
if self.method:
|
447 |
+
schema_formals.insert(0, "self")
|
448 |
+
return f'def {self.name}({", ".join(schema_formals)}) -> {returns_str}: ...'
|
449 |
+
|
450 |
+
|
451 |
+
# The deprecated python signature involves some special logic, so create a
|
452 |
+
# dedicated data model to store these extra properties.
|
453 |
+
@dataclass(frozen=True)
|
454 |
+
class PythonSignatureDeprecated(PythonSignature):
|
455 |
+
# Schema for the deprecated function
|
456 |
+
deprecated_schema: FunctionSchema
|
457 |
+
|
458 |
+
# The deprecated signature might miss some arguments that the corresponding
|
459 |
+
# C++ signature expects. We need store the constant default values to pass in.
|
460 |
+
# For example:
|
461 |
+
# [deprecate signature]: addmm(Scalar beta, Tensor self, Tensor mat1, Tensor mat2)
|
462 |
+
# [func schema]: aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
|
463 |
+
# [func call]: self.addmm(mat1, mat2, beta, 1)
|
464 |
+
# We store ['self', 'mat1', 'mat2', 'beta', '1'] in this case.
|
465 |
+
deprecated_args_exprs: Tuple[str, ...]
|
466 |
+
|
467 |
+
@property
|
468 |
+
def deprecated(self) -> bool:
|
469 |
+
return True
|
470 |
+
|
471 |
+
def signature_str(self, *, skip_outputs: bool = False, symint: bool = True) -> str:
|
472 |
+
return (
|
473 |
+
PythonSignature.signature_str(
|
474 |
+
self, skip_outputs=skip_outputs, symint=symint
|
475 |
+
)
|
476 |
+
+ "|deprecated"
|
477 |
+
)
|
478 |
+
|
479 |
+
def signature_str_pyi(self, *, skip_outputs: bool = False) -> str:
|
480 |
+
args = self.arguments(skip_outputs=skip_outputs)
|
481 |
+
schema_formals: List[str] = [
|
482 |
+
a.argument_str_pyi(method=self.method, deprecated=True) for a in args
|
483 |
+
]
|
484 |
+
positional_argc = len(self.input_args)
|
485 |
+
if len(schema_formals) > positional_argc:
|
486 |
+
schema_formals.insert(positional_argc, "*")
|
487 |
+
|
488 |
+
returns_str = returns_str_pyi(self)
|
489 |
+
return f'def {self.name}({", ".join(schema_formals)}) -> {returns_str}: ...'
|
490 |
+
|
491 |
+
def signature_str_pyi_vararg(self, *, skip_outputs: bool = False) -> Optional[str]:
|
492 |
+
# the codegen doesn't include vararg variants for deprecated signatures
|
493 |
+
return None
|
494 |
+
|
495 |
+
|
496 |
+
# This struct is used to hold the PythonSignature and its corresponding
|
497 |
+
# NativeFunction BEFORE grouping base and out-variant functions.
|
498 |
+
# Why not store NativeFunction in PythonSignature or construct PythonSignature
|
499 |
+
# from NativeFunction? Because they are not 1-1 mapped.
|
500 |
+
# One native function could have both deprecated and non-deprecated python
|
501 |
+
# signatures - NativeFunction doesn't contain information to construct the
|
502 |
+
# deprecated python signature.
|
503 |
+
# One python signature is used to handle both the base and the out-variant
|
504 |
+
# function - see 'PythonSignatureGroup'.
|
505 |
+
@dataclass(frozen=True)
|
506 |
+
class PythonSignatureNativeFunctionPair:
|
507 |
+
signature: PythonSignature
|
508 |
+
function: NativeFunction
|
509 |
+
|
510 |
+
|
511 |
+
# We merge pairs of functions with signatures that are equivalent mod
|
512 |
+
# output arguments, and use a single entry in the python_arg_parser sig
|
513 |
+
# list for both (output arguments become optional).
|
514 |
+
@dataclass(frozen=True)
|
515 |
+
class PythonSignatureGroup:
|
516 |
+
# The signature used for Python argument parsing. The outplace signature
|
517 |
+
# is preferred if exists, because it can be used to parse inputs for both
|
518 |
+
# the out-place variant and the base version (with output omitted).
|
519 |
+
signature: PythonSignature
|
520 |
+
|
521 |
+
# The regular ATen declaration (e.g. conv2d)
|
522 |
+
base: NativeFunction
|
523 |
+
|
524 |
+
# The out variant (e.g. conv2d_out)
|
525 |
+
outplace: Optional[NativeFunction]
|
526 |
+
|
527 |
+
@classmethod
|
528 |
+
def from_pairs(
|
529 |
+
cls,
|
530 |
+
functional: PythonSignatureNativeFunctionPair,
|
531 |
+
out: Optional[PythonSignatureNativeFunctionPair],
|
532 |
+
) -> "PythonSignatureGroup":
|
533 |
+
if out is None:
|
534 |
+
return PythonSignatureGroup(
|
535 |
+
signature=functional.signature,
|
536 |
+
base=functional.function,
|
537 |
+
outplace=None,
|
538 |
+
)
|
539 |
+
|
540 |
+
# prefer the signature with optional out=... arguments because it's the
|
541 |
+
# superset that can be used to parse input for both base and outplace.
|
542 |
+
signature_kwargs = out.signature.__dict__.copy()
|
543 |
+
|
544 |
+
# Out overloads in C++ don't have TensorOptions arguments,
|
545 |
+
# so take these from the functional variant
|
546 |
+
signature_kwargs[
|
547 |
+
"tensor_options_args"
|
548 |
+
] = functional.signature.tensor_options_args
|
549 |
+
|
550 |
+
return PythonSignatureGroup(
|
551 |
+
signature=type(out.signature)(**signature_kwargs),
|
552 |
+
base=functional.function,
|
553 |
+
outplace=out.function,
|
554 |
+
)
|
555 |
+
|
556 |
+
|
557 |
+
# C++ function dispatch is wrapped in a lambda function. The lambda function
|
558 |
+
# has almost the same signature as the C++ function, only with some small
|
559 |
+
# variants - see details below.
|
560 |
+
# This data model is used to represent arguments of the lambda function
|
561 |
+
# signature.
|
562 |
+
@dataclass(frozen=True)
|
563 |
+
class DispatchLambdaArgument:
|
564 |
+
name: str
|
565 |
+
type_str: str
|
566 |
+
is_out_arg: bool
|
567 |
+
|
568 |
+
|
569 |
+
# To pass PyObjects arguments to C++ function (via the lambda wrapper),
|
570 |
+
# we need first convert PyObjects into simple C++ objects. This work
|
571 |
+
# is done by PythonArgParser.
|
572 |
+
# This data model is used to represent the output of PythonArgParser.
|
573 |
+
# It has 1-1 mapping with PythonArgument in PythonSignature.
|
574 |
+
@dataclass(frozen=True)
|
575 |
+
class PythonArgParserOutputExpr:
|
576 |
+
# argument name
|
577 |
+
name: str
|
578 |
+
|
579 |
+
# RHS expression to reference PythonArgParser output.
|
580 |
+
expr: str
|
581 |
+
|
582 |
+
# In some special cases we need create different expr, e.g.:
|
583 |
+
# '_r.isNone(1)' instead of '_r.tensor(1)'.
|
584 |
+
index: int
|
585 |
+
|
586 |
+
# The python argument it maps to.
|
587 |
+
argument: PythonArgument
|
588 |
+
|
589 |
+
@property
|
590 |
+
def is_none_expr(self) -> str:
|
591 |
+
return f"_r.isNone({self.index})"
|
592 |
+
|
593 |
+
|
594 |
+
# To pass PythonArgParser output to the lambda wrapper, we need bind
|
595 |
+
# PythonArgParserOutputExpr to DispatchLambdaArgument.
|
596 |
+
# They are not always 1-1 mapped, e.g. scattered TensorOptions fields
|
597 |
+
# need be packed into a TensorOptions object, which is the argument
|
598 |
+
# that the lambda function wrapper takes.
|
599 |
+
@dataclass(frozen=True)
|
600 |
+
class DispatchLambdaArgumentExprs:
|
601 |
+
# The exprs that provide the binding for lambda arguments, e.g.:
|
602 |
+
#
|
603 |
+
# 'self' -> '_r.tensor(0)'
|
604 |
+
# 'min' -> 'out[0]' / 'min_indices' -> 'out[1]'
|
605 |
+
# 'options' -> 'options'
|
606 |
+
#
|
607 |
+
# It has 1-1 mapping with DispatchLambdaArgument.
|
608 |
+
exprs: Sequence[str]
|
609 |
+
|
610 |
+
# Special local inits, which might introduce new variables that
|
611 |
+
# the 'exprs' above reference, e.g.:
|
612 |
+
#
|
613 |
+
# 'auto out = _r.tensorlist_n<2>(2);'
|
614 |
+
#
|
615 |
+
inits: Sequence[str]
|
616 |
+
|
617 |
+
|
618 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
619 |
+
#
|
620 |
+
# Helper Functions
|
621 |
+
#
|
622 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
623 |
+
|
624 |
+
|
625 |
+
def _cpp_signature(f: NativeFunction, *, method: bool = False) -> CppSignature:
|
626 |
+
return CppSignatureGroup.from_native_function(f, method=method).signature
|
627 |
+
|
628 |
+
|
629 |
+
def has_tensor_options(f: NativeFunction) -> bool:
|
630 |
+
return f.func.arguments.tensor_options is not None
|
631 |
+
|
632 |
+
|
633 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
634 |
+
#
|
635 |
+
# Python Signature
|
636 |
+
#
|
637 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
638 |
+
|
639 |
+
|
640 |
+
# 'simple_type' was introduced by the old codegen, which is slightly
|
641 |
+
# different from the python schema type, e.g.: doesn't have '?' suffix
|
642 |
+
# for optional Tensor/TensorList; doesn't have '[size]' suffix for list type.
|
643 |
+
def argument_type_str(
|
644 |
+
t: Type, *, simple_type: bool = False, symint: bool = True
|
645 |
+
) -> str:
|
646 |
+
if isinstance(t, BaseType):
|
647 |
+
if t.name == BaseTy.Tensor:
|
648 |
+
return "Tensor"
|
649 |
+
elif t.name == BaseTy.int:
|
650 |
+
return "int64_t"
|
651 |
+
elif t.name == BaseTy.float:
|
652 |
+
return "double"
|
653 |
+
elif t.name == BaseTy.str:
|
654 |
+
return "c10::string_view"
|
655 |
+
elif t.name in [
|
656 |
+
BaseTy.bool,
|
657 |
+
BaseTy.QScheme,
|
658 |
+
BaseTy.Scalar,
|
659 |
+
BaseTy.ScalarType,
|
660 |
+
BaseTy.Generator,
|
661 |
+
BaseTy.Storage,
|
662 |
+
BaseTy.Layout,
|
663 |
+
BaseTy.Device,
|
664 |
+
BaseTy.DeviceIndex,
|
665 |
+
BaseTy.MemoryFormat,
|
666 |
+
BaseTy.Dimname,
|
667 |
+
BaseTy.Stream,
|
668 |
+
BaseTy.ConstQuantizerPtr,
|
669 |
+
BaseTy.SymInt,
|
670 |
+
]:
|
671 |
+
# These python schema type names line up with their function schema names
|
672 |
+
return t.name.name
|
673 |
+
|
674 |
+
elif isinstance(t, OptionalType):
|
675 |
+
if str(t.elem) == "Tensor":
|
676 |
+
# Is it desired to keep '?' for simple_type with new style dispatcher?
|
677 |
+
return "Tensor?"
|
678 |
+
elem = argument_type_str(t.elem, simple_type=simple_type, symint=symint)
|
679 |
+
return f"{elem}?"
|
680 |
+
elif isinstance(t, ListType):
|
681 |
+
size = t.size if not simple_type else None
|
682 |
+
if str(t.elem) == "bool":
|
683 |
+
assert t.size is not None
|
684 |
+
return f"::std::array<bool,{t.size}>"
|
685 |
+
elif str(t.elem) == "int":
|
686 |
+
return f"IntArrayRef[{size}]" if size is not None else "IntArrayRef"
|
687 |
+
elif str(t.elem) == "SymInt":
|
688 |
+
if symint:
|
689 |
+
return (
|
690 |
+
f"SymIntArrayRef[{size}]" if size is not None else "SymIntArrayRef"
|
691 |
+
)
|
692 |
+
else:
|
693 |
+
return f"IntArrayRef[{size}]" if size is not None else "IntArrayRef"
|
694 |
+
elif str(t.elem) == "Tensor":
|
695 |
+
return f"TensorList[{size}]" if size is not None else "TensorList"
|
696 |
+
elif str(t.elem) == "Scalar":
|
697 |
+
return f"ScalarList[{size}]" if size is not None else "ScalarList"
|
698 |
+
elif str(t.elem) == "Tensor?":
|
699 |
+
if simple_type:
|
700 |
+
return "c10::List<c10::optional<Tensor>>"
|
701 |
+
else:
|
702 |
+
return "const c10::List<c10::optional<Tensor>> &"
|
703 |
+
elif str(t.elem) == "Dimname":
|
704 |
+
return f"DimnameList[{size}]" if size is not None else "DimnameList"
|
705 |
+
elem = argument_type_str(t.elem, simple_type=simple_type, symint=symint)
|
706 |
+
return f"ArrayRef<{elem}>"
|
707 |
+
|
708 |
+
raise RuntimeError(f"unrecognized type {repr(t)}")
|
709 |
+
|
710 |
+
|
711 |
+
def argument_type_size(t: Type) -> Optional[int]:
|
712 |
+
l = t.is_list_like()
|
713 |
+
if l is not None and str(l.elem) != "bool":
|
714 |
+
return l.size
|
715 |
+
else:
|
716 |
+
return None
|
717 |
+
|
718 |
+
|
719 |
+
def argument(a: Argument) -> PythonArgument:
|
720 |
+
return PythonArgument(
|
721 |
+
name=a.name,
|
722 |
+
type=a.type,
|
723 |
+
# TODO: directly translate a.default to python default
|
724 |
+
default=str(
|
725 |
+
pythonify_default(cpp.default_expr(a.default, a.type, symint=False))
|
726 |
+
)
|
727 |
+
if a.default is not None
|
728 |
+
else None,
|
729 |
+
default_init=None,
|
730 |
+
)
|
731 |
+
|
732 |
+
|
733 |
+
# Generates a PythonSignature that can be used for either .pyi or PythonArgParser codegen
|
734 |
+
def signature(
|
735 |
+
f: NativeFunction, *, method: bool = False, pyi: bool = False
|
736 |
+
) -> PythonSignature:
|
737 |
+
return signature_from_schema(
|
738 |
+
f.func, category_override=f.category_override, method=method, pyi=pyi
|
739 |
+
)
|
740 |
+
|
741 |
+
|
742 |
+
def signature_from_schema(
|
743 |
+
func: FunctionSchema,
|
744 |
+
*,
|
745 |
+
category_override: Optional[str],
|
746 |
+
method: bool = False,
|
747 |
+
pyi: bool = False,
|
748 |
+
) -> PythonSignature:
|
749 |
+
args: List[Argument] = []
|
750 |
+
args.extend(func.arguments.pre_self_positional)
|
751 |
+
# Skip SelfArgument if this is method.
|
752 |
+
if not method and func.arguments.self_arg is not None:
|
753 |
+
args.append(func.arguments.self_arg.argument)
|
754 |
+
args.extend(func.arguments.post_self_positional)
|
755 |
+
args.extend(func.arguments.pre_tensor_options_kwarg_only)
|
756 |
+
# Skip TensorOptionsArguments. Python side TensorOptions
|
757 |
+
# arguments are created based on different rules - see below.
|
758 |
+
args.extend(func.arguments.post_tensor_options_kwarg_only)
|
759 |
+
args.extend(func.arguments.out)
|
760 |
+
|
761 |
+
input_arg_set = {a.name for a in func.arguments.flat_positional}
|
762 |
+
kwarg_only_set = {a.name for a in func.arguments.flat_kwarg_only}
|
763 |
+
out_arg_set = {a.name for a in func.arguments.out}
|
764 |
+
|
765 |
+
input_args = tuple(map(argument, filter(lambda a: a.name in input_arg_set, args)))
|
766 |
+
input_kwargs = tuple(
|
767 |
+
map(argument, filter(lambda a: a.name in kwarg_only_set, args))
|
768 |
+
)
|
769 |
+
outputs = tuple(map(argument, filter(lambda a: a.name in out_arg_set, args)))
|
770 |
+
|
771 |
+
# Reintroduce the scattered fields of TensorOptions for Python.
|
772 |
+
# Compared to the cpp counterpart, the python arguments have new property
|
773 |
+
# (default_init) and a new argument 'requires_grad', which require some
|
774 |
+
# special handlings.
|
775 |
+
# [old codegen] TODO: because these aren't guaranteed to be 100% faithful
|
776 |
+
# to the original versions in the yaml, this recreation is a potential
|
777 |
+
# source of drift between eager and JIT. Pull this logic out to a shared place.
|
778 |
+
|
779 |
+
has_tensor_input_arg = any(
|
780 |
+
a.type.is_tensor_like() for a in func.arguments.flat_non_out
|
781 |
+
)
|
782 |
+
if any(a.name == "requires_grad" for a in func.schema_order_arguments()):
|
783 |
+
raise ValueError(
|
784 |
+
"argument named requires_grad is reserved, should not explicitly add it in the schema"
|
785 |
+
)
|
786 |
+
|
787 |
+
# [old codegen] this probably won't work if one of the returns is not a tensor,
|
788 |
+
# but it will produce a compile-time error that is obvious.
|
789 |
+
has_tensor_return = any(r.type.is_tensor_like() for r in func.returns)
|
790 |
+
|
791 |
+
name: str = cpp.name(func)
|
792 |
+
is_factory_function = category_override == "factory" or (
|
793 |
+
has_tensor_return and not has_tensor_input_arg
|
794 |
+
)
|
795 |
+
is_like_or_new_function = (
|
796 |
+
category_override in ("new", "like")
|
797 |
+
or name.startswith("new_")
|
798 |
+
or name.endswith("_like")
|
799 |
+
)
|
800 |
+
is_dummy_function = category_override == "dummy"
|
801 |
+
|
802 |
+
tensor_options_args: List[PythonArgument] = []
|
803 |
+
if (is_factory_function or is_like_or_new_function) and not is_dummy_function:
|
804 |
+
|
805 |
+
def topt_default_init(name: str) -> Optional[str]:
|
806 |
+
topt_args = func.arguments.tensor_options
|
807 |
+
if topt_args is None:
|
808 |
+
return None
|
809 |
+
a = getattr(topt_args, name)
|
810 |
+
if a.default is None or a.default == "None":
|
811 |
+
return None
|
812 |
+
return cpp.default_expr(a.default, a.type, symint=False)
|
813 |
+
|
814 |
+
tensor_options_args.append(
|
815 |
+
PythonArgument(
|
816 |
+
name="dtype",
|
817 |
+
type=OptionalType(BaseType(BaseTy.ScalarType)),
|
818 |
+
default="None",
|
819 |
+
default_init=(
|
820 |
+
None if is_like_or_new_function else topt_default_init("dtype")
|
821 |
+
),
|
822 |
+
)
|
823 |
+
)
|
824 |
+
tensor_options_args.append(
|
825 |
+
PythonArgument(
|
826 |
+
name="layout",
|
827 |
+
type=OptionalType(BaseType(BaseTy.Layout)),
|
828 |
+
default="None",
|
829 |
+
default_init=(
|
830 |
+
None if is_like_or_new_function else topt_default_init("layout")
|
831 |
+
),
|
832 |
+
)
|
833 |
+
)
|
834 |
+
tensor_options_args.append(
|
835 |
+
PythonArgument(
|
836 |
+
name="device",
|
837 |
+
type=OptionalType(BaseType(BaseTy.Device)),
|
838 |
+
default="None",
|
839 |
+
default_init=(
|
840 |
+
None
|
841 |
+
if is_like_or_new_function
|
842 |
+
else (
|
843 |
+
topt_default_init("device")
|
844 |
+
or "torch::tensors::get_default_device()"
|
845 |
+
)
|
846 |
+
),
|
847 |
+
)
|
848 |
+
)
|
849 |
+
tensor_options_args.append(
|
850 |
+
PythonArgument(
|
851 |
+
name="pin_memory",
|
852 |
+
type=OptionalType(BaseType(BaseTy.bool)),
|
853 |
+
default="False",
|
854 |
+
default_init=None,
|
855 |
+
)
|
856 |
+
)
|
857 |
+
tensor_options_args.append(
|
858 |
+
PythonArgument(
|
859 |
+
name="requires_grad",
|
860 |
+
type=OptionalType(BaseType(BaseTy.bool)),
|
861 |
+
default="False",
|
862 |
+
default_init=None,
|
863 |
+
)
|
864 |
+
)
|
865 |
+
|
866 |
+
returns = PythonReturns(returns=func.returns)
|
867 |
+
|
868 |
+
return PythonSignature(
|
869 |
+
name=str(func.name.name),
|
870 |
+
input_args=input_args,
|
871 |
+
input_kwargs=input_kwargs,
|
872 |
+
output_args=PythonOutArgument.from_outputs(outputs),
|
873 |
+
tensor_options_args=tuple(tensor_options_args),
|
874 |
+
returns=returns,
|
875 |
+
method=method,
|
876 |
+
)
|
877 |
+
|
878 |
+
|
879 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
880 |
+
#
|
881 |
+
# Python Interface
|
882 |
+
#
|
883 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
884 |
+
|
885 |
+
|
886 |
+
def structseq_fieldnames(returns: Tuple[Return, ...]) -> List[str]:
|
887 |
+
if len(returns) <= 1 or all(r.name is None for r in returns):
|
888 |
+
return []
|
889 |
+
else:
|
890 |
+
if any(r.name is None for r in returns):
|
891 |
+
# When building on Windows, `PyStructSequence_UnnamedField` could not be
|
892 |
+
# resolved by the linker for some reason, which cause error in building:
|
893 |
+
#
|
894 |
+
# python_nn_functions.cpp.obj : error LNK2001: unresolved external symbol
|
895 |
+
# PyStructSequence_UnnamedField
|
896 |
+
#
|
897 |
+
# Thus, at this point in time, we do not support unnamed
|
898 |
+
# fields in structseq; you must either name all fields,
|
899 |
+
# or none of them.
|
900 |
+
raise ValueError("Unnamed field is not supported by codegen")
|
901 |
+
|
902 |
+
return [str(r.name) for r in returns]
|
903 |
+
|
904 |
+
|
905 |
+
def argument_type_str_pyi(t: Type) -> str:
|
906 |
+
add_optional = False
|
907 |
+
if isinstance(t, OptionalType):
|
908 |
+
t = t.elem
|
909 |
+
add_optional = True
|
910 |
+
|
911 |
+
if isinstance(t, BaseType):
|
912 |
+
if t.name in [BaseTy.int, BaseTy.DeviceIndex]:
|
913 |
+
ret = "_int"
|
914 |
+
if t.name == BaseTy.SymInt:
|
915 |
+
ret = "Union[_int, SymInt]"
|
916 |
+
elif t.name == BaseTy.float:
|
917 |
+
ret = "_float"
|
918 |
+
elif t.name == BaseTy.str:
|
919 |
+
ret = "str"
|
920 |
+
elif t.name == BaseTy.Scalar:
|
921 |
+
ret = "Union[Number, _complex]"
|
922 |
+
elif t.name == BaseTy.ScalarType:
|
923 |
+
ret = "_dtype"
|
924 |
+
elif t.name == BaseTy.bool:
|
925 |
+
ret = "_bool"
|
926 |
+
elif t.name == BaseTy.QScheme:
|
927 |
+
ret = "_qscheme"
|
928 |
+
elif t.name == BaseTy.Layout:
|
929 |
+
ret = "_layout"
|
930 |
+
elif t.name == BaseTy.Device:
|
931 |
+
ret = "Optional[DeviceLikeType]"
|
932 |
+
elif t.name == BaseTy.MemoryFormat:
|
933 |
+
ret = "memory_format"
|
934 |
+
elif t.name == BaseTy.Dimname:
|
935 |
+
ret = "Union[str, ellipsis, None]"
|
936 |
+
elif t.name == BaseTy.Storage:
|
937 |
+
ret = "Union[Storage, UntypedStorage]"
|
938 |
+
elif t.name in [BaseTy.Tensor, BaseTy.Generator, BaseTy.Stream]:
|
939 |
+
# These python schema type names line up with their function schema names
|
940 |
+
ret = t.name.name
|
941 |
+
|
942 |
+
elif isinstance(t, ListType):
|
943 |
+
if str(t.elem) == "int":
|
944 |
+
ret = "Union[_int, _size]" if t.size is not None else "_size"
|
945 |
+
elif t.is_tensor_like():
|
946 |
+
# TODO: this doesn't seem right...
|
947 |
+
# Tensor?[] currently translates to Optional[Union[Tuple[Tensor, ...], List[Tensor]]]
|
948 |
+
# It should probably translate to Union[Tuple[Optional[Tensor], ...], List[Optional[Tensor]]]
|
949 |
+
if isinstance(t.elem, OptionalType):
|
950 |
+
add_optional = True
|
951 |
+
ret = (
|
952 |
+
"Union[Tensor, Tuple[Tensor, ...], List[Tensor]]"
|
953 |
+
if t.size is not None
|
954 |
+
else "Union[Tuple[Tensor, ...], List[Tensor]]"
|
955 |
+
)
|
956 |
+
elif str(t.elem) == "float":
|
957 |
+
ret = "Sequence[_float]"
|
958 |
+
elif str(t.elem) == "SymInt" and t.size is not None:
|
959 |
+
elem = argument_type_str_pyi(t.elem)
|
960 |
+
ret = f"Union[{elem}, Sequence[{elem}]]"
|
961 |
+
else:
|
962 |
+
elem = argument_type_str_pyi(t.elem)
|
963 |
+
ret = f"Sequence[{elem}]"
|
964 |
+
|
965 |
+
else:
|
966 |
+
raise RuntimeError(f"unrecognized type {repr(t)}")
|
967 |
+
|
968 |
+
if add_optional:
|
969 |
+
ret = "Optional[" + ret + "]"
|
970 |
+
|
971 |
+
return ret
|
972 |
+
|
973 |
+
|
974 |
+
def return_type_str_pyi(t: Type) -> str:
|
975 |
+
# Where arguments are open to accepting Union, return types should return
|
976 |
+
# concrete types
|
977 |
+
|
978 |
+
if isinstance(t, OptionalType):
|
979 |
+
inner = return_type_str_pyi(t.elem)
|
980 |
+
return f"Optional[{inner}]"
|
981 |
+
|
982 |
+
if isinstance(t, BaseType):
|
983 |
+
if t.name == BaseTy.Device:
|
984 |
+
return "_device"
|
985 |
+
elif t.name == BaseTy.Dimname:
|
986 |
+
ret = "Optional[str]"
|
987 |
+
else:
|
988 |
+
return argument_type_str_pyi(t)
|
989 |
+
|
990 |
+
if isinstance(t, ListType):
|
991 |
+
inner = return_type_str_pyi(t.elem)
|
992 |
+
return f"Tuple[{inner}, ...]"
|
993 |
+
|
994 |
+
return argument_type_str_pyi(t)
|
995 |
+
|
996 |
+
|
997 |
+
def returns_structseq_pyi(signature: PythonSignature) -> Optional[Tuple[str, str]]:
|
998 |
+
python_returns = [return_type_str_pyi(r.type) for r in signature.returns.returns]
|
999 |
+
structseq_name = signature.name
|
1000 |
+
field_names = structseq_fieldnames(signature.returns.returns)
|
1001 |
+
if field_names:
|
1002 |
+
# These types are structseq objects which act like named NamedTuples, but
|
1003 |
+
# the constructor acts like the constructor of tuple. Using typing.NamedTuple
|
1004 |
+
# does not allow us to override __init__.
|
1005 |
+
field_names_str = ", ".join(repr(name) for name in field_names)
|
1006 |
+
seq_type = f"Tuple[{', '.join(python_returns)}]"
|
1007 |
+
structseq_def_lines = [
|
1008 |
+
f"class {structseq_name}({seq_type}):",
|
1009 |
+
]
|
1010 |
+
for name, typ in zip(field_names, python_returns):
|
1011 |
+
structseq_def_lines.extend(
|
1012 |
+
[
|
1013 |
+
" @property",
|
1014 |
+
f" def {name}(self) -> {typ}: ...",
|
1015 |
+
]
|
1016 |
+
)
|
1017 |
+
structseq_def_lines.extend(
|
1018 |
+
[
|
1019 |
+
f" def __new__(cls, sequence: {seq_type}): ...",
|
1020 |
+
f" n_fields: _int = {len(field_names)}",
|
1021 |
+
f" n_sequeunce_fields: _int = {len(field_names)}",
|
1022 |
+
" n_unnamed_fields: _int = 0",
|
1023 |
+
" def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing",
|
1024 |
+
"", # add an extra newline
|
1025 |
+
]
|
1026 |
+
)
|
1027 |
+
structseq_def = "\n".join(structseq_def_lines)
|
1028 |
+
# Example:
|
1029 |
+
# structseq_def = (
|
1030 |
+
# "class max(Tuple[Tensor, Tensor]):\n"
|
1031 |
+
# " @property\n"
|
1032 |
+
# " def values(self) -> Tensor: ...\n"
|
1033 |
+
# " @property\n"
|
1034 |
+
# " def indices(self) -> Tensor: ...\n"
|
1035 |
+
# " def __new__(cls, sequence: Tuple[Tensor, Tensor]): ...\n"
|
1036 |
+
# " n_fields: _int = 2",
|
1037 |
+
# " n_sequeunce_fields: _int = 2",
|
1038 |
+
# " n_unnamed_fields: _int = 0",
|
1039 |
+
# " def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing",
|
1040 |
+
# )
|
1041 |
+
return structseq_name, structseq_def
|
1042 |
+
return None
|
1043 |
+
|
1044 |
+
|
1045 |
+
def returns_str_pyi(signature: PythonSignature) -> str:
|
1046 |
+
field_names = structseq_fieldnames(signature.returns.returns)
|
1047 |
+
if field_names:
|
1048 |
+
return f"torch.return_types.{signature.name}"
|
1049 |
+
|
1050 |
+
python_returns = [return_type_str_pyi(r.type) for r in signature.returns.returns]
|
1051 |
+
if len(python_returns) > 1:
|
1052 |
+
return "Tuple[" + ", ".join(python_returns) + "]"
|
1053 |
+
if len(python_returns) == 1:
|
1054 |
+
return python_returns[0]
|
1055 |
+
return "None"
|
1056 |
+
|
1057 |
+
|
1058 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
1059 |
+
#
|
1060 |
+
# C++ Function Dispatch
|
1061 |
+
#
|
1062 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
1063 |
+
# This section provides APIs to generate the code that does C++ function
|
1064 |
+
# dispatch. The C++ function call is wrapped by a lambda function.
|
1065 |
+
# For example:
|
1066 |
+
#
|
1067 |
+
# // aten::selu_(Tensor(a!) self) -> Tensor(a!)
|
1068 |
+
# auto dispatch_selu_ = [](Tensor self) -> Tensor {
|
1069 |
+
# pybind11::gil_scoped_release no_gil;
|
1070 |
+
# return at::selu_(self);
|
1071 |
+
# };
|
1072 |
+
#
|
1073 |
+
# The lambda function's signature follows the C++ signature in common
|
1074 |
+
# cases, e.g.:
|
1075 |
+
#
|
1076 |
+
# // aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
|
1077 |
+
# [](const Tensor & self, const Tensor & other, Scalar alpha) -> Tensor
|
1078 |
+
#
|
1079 |
+
# For out variant the 'out' argument's type is changed from 'Tensor &'
|
1080 |
+
# to 'Tensor'. It's because when calling the lambda it passes in the
|
1081 |
+
# PythonArgParser output '_r.tensor(3)', which is stack allocated object
|
1082 |
+
# and needs to pass by value. Also see comments in 'dispatch_lambda_return_str()'.
|
1083 |
+
#
|
1084 |
+
# // aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
|
1085 |
+
# [](Tensor out, const Tensor & self, const Tensor & other, Scalar alpha) -> Tensor
|
1086 |
+
#
|
1087 |
+
# For multi-output case it can keep using reference type because the
|
1088 |
+
# PythonArgParser output has been unpacked to local variables, e.g.:
|
1089 |
+
#
|
1090 |
+
# // aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *,
|
1091 |
+
# // Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
|
1092 |
+
# [](Tensor & max, Tensor & max_values, const Tensor & self, Dimname dim, bool keepdim) -> std::tuple<Tensor,Tensor>
|
1093 |
+
#
|
1094 |
+
# For deprecated python signature, it should follow deprecated python arg order.
|
1095 |
+
# TODO: This is to keep same byte-for-byte result as the old codegen - maybe unnecessary?
|
1096 |
+
|
1097 |
+
|
1098 |
+
def dispatch_lambda_args(
|
1099 |
+
ps: PythonSignature, f: NativeFunction, symint: bool = True
|
1100 |
+
) -> Tuple[DispatchLambdaArgument, ...]:
|
1101 |
+
if isinstance(ps, PythonSignatureDeprecated):
|
1102 |
+
schema = ps.deprecated_schema
|
1103 |
+
else:
|
1104 |
+
schema = f.func
|
1105 |
+
|
1106 |
+
# Start with cpp arguments - dispatch lambda signature always include 'self'
|
1107 |
+
cpp_args = cpp.arguments(
|
1108 |
+
arguments=schema.arguments,
|
1109 |
+
faithful=False,
|
1110 |
+
symint=symint,
|
1111 |
+
method=False,
|
1112 |
+
cpp_no_default_args=f.cpp_no_default_args,
|
1113 |
+
)
|
1114 |
+
out_args: Set[str] = {a.name for a in schema.arguments.out}
|
1115 |
+
|
1116 |
+
# Convert from cpp argument to lambda argument
|
1117 |
+
def dispatch_lambda_arg(cpp_arg: Binding) -> DispatchLambdaArgument:
|
1118 |
+
type_str = cpp_arg.type
|
1119 |
+
is_out_arg = cpp_arg.name in out_args
|
1120 |
+
if ps.method and cpp_arg.name == "self":
|
1121 |
+
# For method's 'self', we can use 'const Tensor &' and simply ignore mutability!
|
1122 |
+
type_str = "const at::Tensor &"
|
1123 |
+
else:
|
1124 |
+
# For other cases we need prevent dangling refs to temps (unless it's
|
1125 |
+
# unpacked scattered output)
|
1126 |
+
# The reason is explained in the comments above and in 'dispatch_lambda_return_str()'.
|
1127 |
+
# TODO: avoid this special handling?
|
1128 |
+
ensure_temp_safe = len(out_args) <= 1 or not is_out_arg
|
1129 |
+
if ensure_temp_safe:
|
1130 |
+
type_str = {
|
1131 |
+
"at::Tensor &": "at::Tensor",
|
1132 |
+
}.get(type_str, type_str)
|
1133 |
+
return DispatchLambdaArgument(
|
1134 |
+
name=cpp_arg.name,
|
1135 |
+
type_str=type_str,
|
1136 |
+
is_out_arg=is_out_arg,
|
1137 |
+
)
|
1138 |
+
|
1139 |
+
return tuple(map(dispatch_lambda_arg, cpp_args))
|
1140 |
+
|
1141 |
+
|
1142 |
+
# [old codegen] XXX: if you got here because of an assertion failure, it doesn't mean
|
1143 |
+
# it's enough to just extend the list here. Before you do this, make sure
|
1144 |
+
# to add an appropriate wrap() overload in torch/csrc/autograd/utils/wrap_outputs.h.
|
1145 |
+
SUPPORTED_RETURN_TYPES = {
|
1146 |
+
"at::Tensor",
|
1147 |
+
"::std::tuple<at::Tensor,at::Tensor>",
|
1148 |
+
"::std::tuple<at::Tensor,at::Tensor,at::Tensor>",
|
1149 |
+
"::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor>",
|
1150 |
+
"::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor>",
|
1151 |
+
"::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor>",
|
1152 |
+
"::std::tuple<at::Tensor,at::Tensor,at::Tensor,int64_t>",
|
1153 |
+
"::std::tuple<at::Tensor,at::Tensor,double,int64_t>",
|
1154 |
+
"::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,int64_t>",
|
1155 |
+
"::std::tuple<at::Tensor,at::Tensor,double,at::Tensor,int64_t>",
|
1156 |
+
"::std::tuple<double,int64_t>",
|
1157 |
+
"::std::tuple<at::Tensor,::std::vector<at::Tensor>>",
|
1158 |
+
"::std::vector<at::Tensor>",
|
1159 |
+
# Needed for flash attention forw/backward
|
1160 |
+
"::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,c10::SymInt,c10::SymInt,at::Tensor,at::Tensor,at::Tensor>",
|
1161 |
+
"at::Scalar",
|
1162 |
+
"bool",
|
1163 |
+
"int64_t",
|
1164 |
+
"void*",
|
1165 |
+
"void",
|
1166 |
+
"at::QScheme",
|
1167 |
+
"double",
|
1168 |
+
"at::IntArrayRef",
|
1169 |
+
"at::ScalarType",
|
1170 |
+
"at::Stream",
|
1171 |
+
}
|
1172 |
+
|
1173 |
+
|
1174 |
+
def dispatch_lambda_return_str(f: NativeFunction) -> str:
|
1175 |
+
# [old codegen] Remove type annotation (e.g. 'Tensor' rather than 'Tensor &')
|
1176 |
+
# because the dispatch lambdas take mutable arguments *by value*, not
|
1177 |
+
# by reference. If you then return a reference to such an argument, you
|
1178 |
+
# will now have a pointer to a dangling stack entry. Not good.
|
1179 |
+
#
|
1180 |
+
# You want:
|
1181 |
+
#
|
1182 |
+
# auto dispatch_selu_ = [](Tensor self) -> Tensor { ...; return at::selu_(self); };
|
1183 |
+
# ^^^^^^
|
1184 |
+
#
|
1185 |
+
# *not*
|
1186 |
+
#
|
1187 |
+
# auto dispatch_selu_ = [](Tensor self) -> Tensor& { ...; return at::selu_(self); };
|
1188 |
+
# ^^^^^^^
|
1189 |
+
#
|
1190 |
+
# (NB: We can't make dispatch_selu_ take Tensor&, because the enclosing
|
1191 |
+
# codegen looks like dispatch_selu_(_r.tensor(0)), and you can't take a
|
1192 |
+
# mutable reference to temporary. Maybe we could assign it to a
|
1193 |
+
# variable itself.)
|
1194 |
+
returns_without_annotation = tuple(
|
1195 |
+
Return(r.name, r.type, None) for r in f.func.returns
|
1196 |
+
)
|
1197 |
+
return_str = cpp.returns_type(returns_without_annotation, symint=True).cpp_type()
|
1198 |
+
if return_str not in SUPPORTED_RETURN_TYPES:
|
1199 |
+
raise RuntimeError(f"{f.func.name} returns unsupported type {return_str}")
|
1200 |
+
return return_str
|
1201 |
+
|
1202 |
+
|
1203 |
+
def cpp_dispatch_target(f: NativeFunction) -> str:
|
1204 |
+
symint = f.func.has_symint()
|
1205 |
+
name = cpp.name(f.func, symint_overload=symint)
|
1206 |
+
if Variant.method in f.variants:
|
1207 |
+
return f"self.{name}"
|
1208 |
+
if Variant.function in f.variants:
|
1209 |
+
if has_tensor_options(f) or f.func.name.name.base.endswith("_like"):
|
1210 |
+
namespace = "torch"
|
1211 |
+
else:
|
1212 |
+
namespace = "at"
|
1213 |
+
return f"{namespace}::{name}"
|
1214 |
+
raise RuntimeError(f"could not dispatch, neither function nor method: {f.func}")
|
1215 |
+
|
1216 |
+
|
1217 |
+
def cpp_dispatch_exprs(
|
1218 |
+
f: NativeFunction,
|
1219 |
+
*,
|
1220 |
+
python_signature: Optional[PythonSignature] = None,
|
1221 |
+
) -> Tuple[str, ...]:
|
1222 |
+
cpp_args: Sequence[Binding] = _cpp_signature(f, method=False).arguments()
|
1223 |
+
|
1224 |
+
exprs: Tuple[str, ...] = tuple()
|
1225 |
+
if not isinstance(python_signature, PythonSignatureDeprecated):
|
1226 |
+
# By default the exprs are consistent with the C++ signature.
|
1227 |
+
exprs = tuple(a.name for a in cpp_args)
|
1228 |
+
else:
|
1229 |
+
# For deprecated python signature we may need fill in some constants.
|
1230 |
+
exprs = tuple(
|
1231 |
+
filter(
|
1232 |
+
lambda n: n != "out" or f.func.is_out_fn(),
|
1233 |
+
python_signature.deprecated_args_exprs,
|
1234 |
+
)
|
1235 |
+
)
|
1236 |
+
|
1237 |
+
if Variant.method in f.variants:
|
1238 |
+
exprs = tuple(filter("self".__ne__, exprs))
|
1239 |
+
|
1240 |
+
return exprs
|
1241 |
+
|
1242 |
+
|
1243 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
1244 |
+
#
|
1245 |
+
# Python / C++ Args Binding
|
1246 |
+
#
|
1247 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
1248 |
+
|
1249 |
+
|
1250 |
+
# We explicitly enumerate the PythonArgParser unpacking methods for all
|
1251 |
+
# supported types. This might be more verbose than necessary, partially
|
1252 |
+
# because of the irregularity of unpacking method naming, partially
|
1253 |
+
# because we want to mimic the old codegen behavior - to reject
|
1254 |
+
# unexpected and/or unsupported cases which the old codegen rejects.
|
1255 |
+
# For certain cases it is intentionally more restrictive than necessary,
|
1256 |
+
# e.g.: it doesn't accepts doublelist with definite size.
|
1257 |
+
def arg_parser_unpack_method(
|
1258 |
+
t: Type, default: Optional[str], default_init: Optional[str], *, symint: bool = True
|
1259 |
+
) -> str:
|
1260 |
+
has_default_init = default_init is not None
|
1261 |
+
if has_default_init and str(t) not in (
|
1262 |
+
"ScalarType?",
|
1263 |
+
"ScalarType",
|
1264 |
+
"Device",
|
1265 |
+
"Device?",
|
1266 |
+
"Layout",
|
1267 |
+
"Layout?",
|
1268 |
+
"bool",
|
1269 |
+
"bool?",
|
1270 |
+
):
|
1271 |
+
raise RuntimeError(f"type '{t}' does not supported unpacking with default")
|
1272 |
+
|
1273 |
+
if isinstance(t, BaseType):
|
1274 |
+
if t.name in [
|
1275 |
+
BaseTy.Tensor,
|
1276 |
+
BaseTy.Stream,
|
1277 |
+
BaseTy.Storage,
|
1278 |
+
BaseTy.Scalar,
|
1279 |
+
BaseTy.Dimname,
|
1280 |
+
]:
|
1281 |
+
# These unpack methods line up with their schema names
|
1282 |
+
return t.name.name.lower()
|
1283 |
+
elif t.name == BaseTy.ScalarType:
|
1284 |
+
return "scalartypeWithDefault" if has_default_init else "scalartype"
|
1285 |
+
elif t.name == BaseTy.Device:
|
1286 |
+
return "deviceWithDefault" if has_default_init else "device"
|
1287 |
+
elif t.name == BaseTy.DeviceIndex:
|
1288 |
+
return "toInt64"
|
1289 |
+
elif t.name == BaseTy.int:
|
1290 |
+
return "toInt64"
|
1291 |
+
elif t.name == BaseTy.SymInt:
|
1292 |
+
return "toSymInt" if symint else "toInt64"
|
1293 |
+
elif t.name == BaseTy.bool:
|
1294 |
+
return "toBoolWithDefault" if has_default_init else "toBool"
|
1295 |
+
elif t.name == BaseTy.float:
|
1296 |
+
return "toDouble"
|
1297 |
+
elif t.name == BaseTy.str:
|
1298 |
+
return "stringView"
|
1299 |
+
elif t.name == BaseTy.Layout:
|
1300 |
+
return "layoutWithDefault" if has_default_init else "layout"
|
1301 |
+
elif t.name == BaseTy.MemoryFormat:
|
1302 |
+
return "memoryformat"
|
1303 |
+
|
1304 |
+
elif isinstance(t, OptionalType):
|
1305 |
+
if str(t.elem) == "Tensor":
|
1306 |
+
return "optionalTensor"
|
1307 |
+
elif str(t.elem) == "Generator":
|
1308 |
+
return "generator"
|
1309 |
+
elif str(t.elem) == "Dimname[]":
|
1310 |
+
return "toDimnameListOptional"
|
1311 |
+
elif not has_default_init and default in (None, "None", "c10::nullopt"):
|
1312 |
+
# If default is None: append 'Optional' to elem's unpacking method
|
1313 |
+
return (
|
1314 |
+
arg_parser_unpack_method(t.elem, None, None, symint=symint) + "Optional"
|
1315 |
+
)
|
1316 |
+
else:
|
1317 |
+
# Otherwise, load as underlying type with default
|
1318 |
+
return arg_parser_unpack_method(
|
1319 |
+
t.elem, default, default_init, symint=symint
|
1320 |
+
)
|
1321 |
+
|
1322 |
+
elif isinstance(t, ListType):
|
1323 |
+
if str(t.elem) == "Tensor":
|
1324 |
+
# accept and use definite size
|
1325 |
+
return f"tensorlist_n<{t.size}>" if t.size is not None else "tensorlist"
|
1326 |
+
elif str(t.elem) == "Tensor?":
|
1327 |
+
return "list_of_optional_tensors"
|
1328 |
+
elif str(t.elem) == "Dimname":
|
1329 |
+
# accept definite size
|
1330 |
+
return "dimnamelist"
|
1331 |
+
elif str(t.elem) == "int":
|
1332 |
+
# accept definite size
|
1333 |
+
return "intlist"
|
1334 |
+
elif str(t.elem) == "float":
|
1335 |
+
return "doublelist"
|
1336 |
+
elif str(t.elem) == "SymInt":
|
1337 |
+
# accept definite size
|
1338 |
+
return "symintlist" if symint else "intlist"
|
1339 |
+
elif str(t.elem) == "Scalar":
|
1340 |
+
return "scalarlist"
|
1341 |
+
raise RuntimeError(f"type '{t}' is not supported by PythonArgParser")
|
1342 |
+
|
1343 |
+
|
1344 |
+
# Return RHS expression for python argument using PythonArgParser output.
|
1345 |
+
# e.g. for arg name 'foo', arg type 'bool', arg_index = 2, returns '_r.toBool(2)'
|
1346 |
+
def arg_parser_output_expr(
|
1347 |
+
arg_index: int, a: PythonArgument, *, symint: bool = True
|
1348 |
+
) -> PythonArgParserOutputExpr:
|
1349 |
+
has_default = a.default_init is not None
|
1350 |
+
unpack_method = arg_parser_unpack_method(
|
1351 |
+
t=a.type, default=a.default, default_init=a.default_init, symint=symint
|
1352 |
+
)
|
1353 |
+
default = f", {a.default_init}" if has_default else ""
|
1354 |
+
expr = f"_r.{unpack_method}({arg_index}{default})"
|
1355 |
+
|
1356 |
+
return PythonArgParserOutputExpr(
|
1357 |
+
name=a.name,
|
1358 |
+
expr=expr,
|
1359 |
+
index=arg_index,
|
1360 |
+
argument=a,
|
1361 |
+
)
|
1362 |
+
|
1363 |
+
|
1364 |
+
# Returns a map with key = arg_name and value = PythonArgParserOutputExpr.
|
1365 |
+
def arg_parser_output_exprs(
|
1366 |
+
ps: PythonSignature, f: NativeFunction, *, symint: bool = True
|
1367 |
+
) -> Dict[str, PythonArgParserOutputExpr]:
|
1368 |
+
return {
|
1369 |
+
e.name: e
|
1370 |
+
for i, a in enumerate(ps.arguments())
|
1371 |
+
for e in (arg_parser_output_expr(i, a, symint=symint),)
|
1372 |
+
}
|
1373 |
+
|
1374 |
+
|
1375 |
+
# argument name to type for scattered tensor options fields
|
1376 |
+
TENSOR_OPTIONS_FIELDS = {
|
1377 |
+
"dtype": "ScalarType?",
|
1378 |
+
"device": "Device?",
|
1379 |
+
"layout": "Layout?",
|
1380 |
+
"pin_memory": "bool?",
|
1381 |
+
"requires_grad": "bool?",
|
1382 |
+
}
|
1383 |
+
|
1384 |
+
|
1385 |
+
# bind arg parser outputs (python args) with dispatch lambda arguments (c++ args).
|
1386 |
+
def dispatch_lambda_exprs(
|
1387 |
+
ps: PythonSignature, f: NativeFunction, *, symint: bool = True
|
1388 |
+
) -> DispatchLambdaArgumentExprs:
|
1389 |
+
# This method is to bind 'arg_parser_outputs' and 'lambda_args' by producing
|
1390 |
+
# 'inits' and 'lambda_args_exprs' for each lambda argument using arg parser
|
1391 |
+
# outputs.
|
1392 |
+
arg_parser_outputs = arg_parser_output_exprs(ps, f, symint=symint)
|
1393 |
+
lambda_args = dispatch_lambda_args(ps, f, symint=symint)
|
1394 |
+
inits: List[str] = []
|
1395 |
+
lambda_args_exprs: Dict[str, str] = {}
|
1396 |
+
|
1397 |
+
has_toptions = has_tensor_options(f)
|
1398 |
+
|
1399 |
+
# 1. special inits/unpacking to provide binding exprs for lambda arguments.
|
1400 |
+
for a in ps.arguments(skip_tensor_options=True):
|
1401 |
+
name = a.name
|
1402 |
+
arg_parser_expr = arg_parser_outputs[a.name].expr
|
1403 |
+
|
1404 |
+
if has_toptions and name == "self":
|
1405 |
+
# TODO: why this needs to be special case?
|
1406 |
+
inits.extend(
|
1407 |
+
[
|
1408 |
+
f"auto self = {arg_parser_expr};",
|
1409 |
+
]
|
1410 |
+
)
|
1411 |
+
lambda_args_exprs[name] = name
|
1412 |
+
elif (
|
1413 |
+
isinstance(a, PythonOutArgument)
|
1414 |
+
and len(a.outputs) > 1
|
1415 |
+
and f.func.is_out_fn()
|
1416 |
+
):
|
1417 |
+
inits.extend(
|
1418 |
+
[
|
1419 |
+
f"auto out = {arg_parser_expr};",
|
1420 |
+
]
|
1421 |
+
)
|
1422 |
+
for i, out_arg in enumerate(a.outputs):
|
1423 |
+
lambda_args_exprs[out_arg.name] = f"out[{i}]"
|
1424 |
+
elif str(a.type) == "Dimname[]?":
|
1425 |
+
# [old codegen]
|
1426 |
+
# TODO: make this part of something more general, or get rid of it.
|
1427 |
+
# optional<ArrayRef<T>> are special. The PythonArgParser returns an
|
1428 |
+
# optional<vector<T>>, which cannot be implicitly converted to
|
1429 |
+
# optional<ArrayRef<T>>. One needs to unwrap the optional and rewrap.
|
1430 |
+
inits.extend(
|
1431 |
+
[
|
1432 |
+
f"auto __{name} = {arg_parser_expr};",
|
1433 |
+
f"c10::optional<DimnameList> {name} = __{name} ? c10::make_optional(DimnameList(__{name}.value())) : c10::nullopt;", # noqa: B950
|
1434 |
+
]
|
1435 |
+
)
|
1436 |
+
lambda_args_exprs[name] = name
|
1437 |
+
else:
|
1438 |
+
# default case - directly using PythonArgParser output expr
|
1439 |
+
lambda_args_exprs[name] = arg_parser_expr
|
1440 |
+
|
1441 |
+
# method's self is passed directly to python binding, rather than parsed
|
1442 |
+
if ps.method:
|
1443 |
+
lambda_args_exprs["self"] = "self"
|
1444 |
+
|
1445 |
+
# 2. special packing/checking for TensorOptions.
|
1446 |
+
tensor_options_args_names = [a.name for a in ps.tensor_options_args]
|
1447 |
+
if has_toptions:
|
1448 |
+
if f.func.is_out_fn():
|
1449 |
+
raise RuntimeError(f"{f.func}: tensor options with output arg")
|
1450 |
+
for a in ps.tensor_options_args:
|
1451 |
+
if a.name not in TENSOR_OPTIONS_FIELDS:
|
1452 |
+
raise RuntimeError(
|
1453 |
+
f"{f.func}: unrecognized tensor options field '{a.name}' in python binding arguments"
|
1454 |
+
)
|
1455 |
+
if str(a.type) != TENSOR_OPTIONS_FIELDS.get(a.name):
|
1456 |
+
raise RuntimeError(
|
1457 |
+
f"{f.func}: unrecognized type '{str(a.type)}' for tensor options field '{a.name}'"
|
1458 |
+
)
|
1459 |
+
if not all(
|
1460 |
+
a in tensor_options_args_names for a in TENSOR_OPTIONS_FIELDS.keys()
|
1461 |
+
):
|
1462 |
+
raise RuntimeError(
|
1463 |
+
f"{f.func}: incomplete tensor options args: {tensor_options_args_names}"
|
1464 |
+
)
|
1465 |
+
|
1466 |
+
inits.append(
|
1467 |
+
f"""\
|
1468 |
+
const auto options = TensorOptions()
|
1469 |
+
.dtype({arg_parser_outputs['dtype'].expr})
|
1470 |
+
.device({arg_parser_outputs['device'].expr})
|
1471 |
+
.layout({arg_parser_outputs['layout'].expr})
|
1472 |
+
.requires_grad({arg_parser_outputs['requires_grad'].expr})
|
1473 |
+
.pinned_memory({arg_parser_outputs['pin_memory'].expr});
|
1474 |
+
torch::utils::maybe_initialize_device(options);
|
1475 |
+
"""
|
1476 |
+
)
|
1477 |
+
lambda_args_exprs["options"] = "options"
|
1478 |
+
|
1479 |
+
# 3. special case - access scattered TensorOptions fields without packing
|
1480 |
+
# TODO: maybe move to the generator side as it's not related to binding.
|
1481 |
+
if not has_toptions and tensor_options_args_names:
|
1482 |
+
if "dtype" in tensor_options_args_names:
|
1483 |
+
# we're an output-arg variant, check these args against output tensor
|
1484 |
+
if not f.func.is_out_fn():
|
1485 |
+
raise RuntimeError(
|
1486 |
+
f"{f.func}: dtype in tensor_options_args without output arg"
|
1487 |
+
)
|
1488 |
+
if not all(a in tensor_options_args_names for a in ("layout", "device")):
|
1489 |
+
raise RuntimeError(
|
1490 |
+
f"{f.func}: incomplete tensor options for output check"
|
1491 |
+
)
|
1492 |
+
|
1493 |
+
inits.append(
|
1494 |
+
f"""\
|
1495 |
+
check_out_type_matches({arg_parser_outputs['out'].expr}, {arg_parser_outputs['dtype'].expr},
|
1496 |
+
{arg_parser_outputs['dtype'].is_none_expr}, {arg_parser_outputs['layout'].expr},
|
1497 |
+
{arg_parser_outputs['device'].expr}, {arg_parser_outputs['device'].is_none_expr});
|
1498 |
+
"""
|
1499 |
+
)
|
1500 |
+
# we'll set requires_grad on outgoing tensor
|
1501 |
+
if "requires_grad" not in tensor_options_args_names:
|
1502 |
+
raise RuntimeError(
|
1503 |
+
f'{f.func}: expected "requires_grad" in tensor_options_args absent, but found [{tensor_options_args_names}]'
|
1504 |
+
)
|
1505 |
+
|
1506 |
+
return DispatchLambdaArgumentExprs(
|
1507 |
+
exprs=tuple(lambda_args_exprs[a.name] for a in lambda_args),
|
1508 |
+
inits=inits,
|
1509 |
+
)
|
venv/lib/python3.10/site-packages/torchgen/api/structured.py
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, Union
|
2 |
+
|
3 |
+
from torchgen.api import cpp
|
4 |
+
|
5 |
+
from torchgen.api.types import (
|
6 |
+
ArgName,
|
7 |
+
ArrayRefCType,
|
8 |
+
BaseCType,
|
9 |
+
Binding,
|
10 |
+
ConstRefCType,
|
11 |
+
dimnameListT,
|
12 |
+
intArrayRefT,
|
13 |
+
iOptTensorListRefT,
|
14 |
+
iTensorListRefT,
|
15 |
+
NamedCType,
|
16 |
+
OptionalCType,
|
17 |
+
optionalIntArrayRefT,
|
18 |
+
optionalScalarRefT,
|
19 |
+
optionalTensorRefT,
|
20 |
+
scalarT,
|
21 |
+
tensorT,
|
22 |
+
)
|
23 |
+
from torchgen.model import (
|
24 |
+
Argument,
|
25 |
+
BaseTy,
|
26 |
+
BaseType,
|
27 |
+
ListType,
|
28 |
+
NativeFunctionsGroup,
|
29 |
+
OptionalType,
|
30 |
+
SelfArgument,
|
31 |
+
TensorOptionsArguments,
|
32 |
+
Type,
|
33 |
+
)
|
34 |
+
from torchgen.utils import assert_never
|
35 |
+
|
36 |
+
# This file describes the translation of JIT schema to the structured functions API.
|
37 |
+
# This is similar to native API, but a number of historical problems with native
|
38 |
+
# API have been fixed.
|
39 |
+
|
40 |
+
|
41 |
+
# Translation of types occurring in JIT arguments to a C++ argument type.
|
42 |
+
# NB: For now, mutable doesn't do anything; but it could if we make
|
43 |
+
# some more nominal types
|
44 |
+
def argumenttype_type(t: Type, *, mutable: bool, binds: ArgName) -> NamedCType:
|
45 |
+
# If it's a value type, do the value type translation
|
46 |
+
# NB: structured kernels ALWAYS have symint off, since they involve actual
|
47 |
+
# kernels that require real ints. The one exception is the
|
48 |
+
# CompositeExplicitAutograd and the meta function (which could
|
49 |
+
# hypothetically be SymInt), but for simplicity we plan for these to just
|
50 |
+
# be handled in Python
|
51 |
+
r = cpp.valuetype_type(t, symint=False, binds=binds)
|
52 |
+
if r is not None:
|
53 |
+
return r
|
54 |
+
|
55 |
+
if isinstance(t, BaseType):
|
56 |
+
if t.name == BaseTy.Tensor:
|
57 |
+
return NamedCType(binds, ConstRefCType(BaseCType(tensorT)))
|
58 |
+
elif t.name == BaseTy.Scalar:
|
59 |
+
return NamedCType(binds, ConstRefCType(BaseCType(scalarT)))
|
60 |
+
else:
|
61 |
+
raise AssertionError(f"base type should have been value type {t}")
|
62 |
+
elif isinstance(t, OptionalType):
|
63 |
+
if t.elem == BaseType(BaseTy.Tensor):
|
64 |
+
return NamedCType(binds, BaseCType(optionalTensorRefT))
|
65 |
+
elif t.elem == BaseType(BaseTy.Scalar):
|
66 |
+
return NamedCType(binds, BaseCType(optionalScalarRefT))
|
67 |
+
elif isinstance(t.elem, ListType) and str(t.elem.elem) == "int":
|
68 |
+
return NamedCType(binds, BaseCType(optionalIntArrayRefT))
|
69 |
+
elem = argumenttype_type(t.elem, mutable=mutable, binds=binds)
|
70 |
+
return NamedCType(binds, OptionalCType(elem.type))
|
71 |
+
elif isinstance(t, ListType):
|
72 |
+
if t.elem == BaseType(BaseTy.Tensor):
|
73 |
+
return NamedCType(binds, ConstRefCType(BaseCType(iTensorListRefT)))
|
74 |
+
elif t.elem == OptionalType(BaseType(BaseTy.Tensor)):
|
75 |
+
return NamedCType(binds, BaseCType(iOptTensorListRefT))
|
76 |
+
# TODO: delete these special cases; see torchgen.api.cpp--these
|
77 |
+
# must be changed in tandem, but there are problems; see
|
78 |
+
# https://github.com/pytorch/pytorch/pull/51485
|
79 |
+
elif str(t.elem) == "int":
|
80 |
+
return NamedCType(binds, BaseCType(intArrayRefT))
|
81 |
+
elif str(t.elem) == "Dimname":
|
82 |
+
return NamedCType(binds, BaseCType(dimnameListT))
|
83 |
+
elem = argumenttype_type(t.elem, mutable=mutable, binds=binds)
|
84 |
+
return NamedCType(binds, ArrayRefCType(elem.type))
|
85 |
+
else:
|
86 |
+
raise AssertionError(f"unrecognized type {repr(t)}")
|
87 |
+
|
88 |
+
|
89 |
+
def argument_type(a: Argument, *, binds: ArgName) -> NamedCType:
|
90 |
+
return argumenttype_type(a.type, mutable=a.is_write, binds=binds)
|
91 |
+
|
92 |
+
|
93 |
+
# returns_type intentionally omitted, because structured kernels never "return";
|
94 |
+
# instead, they always indirectly report their outputs (in the case of a meta
|
95 |
+
# function, by calling set_output; in the case of an impl function, by writing
|
96 |
+
# directly into the provided out argument).
|
97 |
+
|
98 |
+
|
99 |
+
# Structured kernels are never defaulted
|
100 |
+
def argument(a: Union[Argument, SelfArgument, TensorOptionsArguments]) -> List[Binding]:
|
101 |
+
if isinstance(a, Argument):
|
102 |
+
return [
|
103 |
+
Binding(
|
104 |
+
nctype=argument_type(a, binds=a.name),
|
105 |
+
name=a.name,
|
106 |
+
default=None,
|
107 |
+
argument=a,
|
108 |
+
)
|
109 |
+
]
|
110 |
+
elif isinstance(a, SelfArgument):
|
111 |
+
return argument(a.argument)
|
112 |
+
elif isinstance(a, TensorOptionsArguments):
|
113 |
+
raise AssertionError("structured kernels don't support TensorOptions yet")
|
114 |
+
else:
|
115 |
+
assert_never(a)
|
116 |
+
|
117 |
+
|
118 |
+
def impl_arguments(g: NativeFunctionsGroup) -> List[Binding]:
|
119 |
+
args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = []
|
120 |
+
|
121 |
+
if g.out.precomputed:
|
122 |
+
# A list of parameters for the impl function with
|
123 |
+
# certain parameters replaced with precomputed counterparts
|
124 |
+
# as specified in native_functions.yaml.
|
125 |
+
non_out_args_replaced: List[
|
126 |
+
Union[Argument, TensorOptionsArguments, SelfArgument]
|
127 |
+
] = []
|
128 |
+
for a in g.out.func.arguments.non_out:
|
129 |
+
if isinstance(a, Argument) and a.name in g.out.precomputed.replace:
|
130 |
+
# If a is in precompute.replace, append the parameters
|
131 |
+
# that should replace it onto non_out_args_replaced.
|
132 |
+
non_out_args_replaced.extend(g.out.precomputed.replace[a.name])
|
133 |
+
else:
|
134 |
+
# If not, push a as it is.
|
135 |
+
non_out_args_replaced.append(a)
|
136 |
+
|
137 |
+
args.extend(non_out_args_replaced)
|
138 |
+
# g.out.precomputed.add is the list of parameters that are added
|
139 |
+
# without replacement after the non out args and just before the out args
|
140 |
+
args.extend(g.out.precomputed.add)
|
141 |
+
else:
|
142 |
+
args.extend(g.out.func.arguments.non_out)
|
143 |
+
|
144 |
+
args.extend(g.out.func.arguments.out)
|
145 |
+
return [r for arg in args for r in argument(arg)]
|
146 |
+
|
147 |
+
|
148 |
+
def meta_arguments(g: NativeFunctionsGroup) -> List[Binding]:
|
149 |
+
args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = []
|
150 |
+
args.extend(g.functional.func.arguments.non_out)
|
151 |
+
return [r for arg in args for r in argument(arg)]
|
152 |
+
|
153 |
+
|
154 |
+
def out_arguments(g: NativeFunctionsGroup) -> List[Binding]:
|
155 |
+
args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = []
|
156 |
+
args.extend(g.out.func.arguments.out)
|
157 |
+
return [r for arg in args for r in argument(arg)]
|
venv/lib/python3.10/site-packages/torchgen/api/translate.py
ADDED
@@ -0,0 +1,430 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict, List, NoReturn, Sequence, Union
|
2 |
+
|
3 |
+
from torchgen.api.types import (
|
4 |
+
ArrayRefCType,
|
5 |
+
BaseCType,
|
6 |
+
Binding,
|
7 |
+
boolT,
|
8 |
+
ConstRefCType,
|
9 |
+
deviceT,
|
10 |
+
Expr,
|
11 |
+
intArrayRefT,
|
12 |
+
iOptTensorListRefT,
|
13 |
+
layoutT,
|
14 |
+
ListCType,
|
15 |
+
longT,
|
16 |
+
memoryFormatT,
|
17 |
+
MutRefCType,
|
18 |
+
NamedCType,
|
19 |
+
opmath_t,
|
20 |
+
OptionalCType,
|
21 |
+
optionalIntArrayRefT,
|
22 |
+
optionalScalarRefT,
|
23 |
+
optionalSymIntArrayRefT,
|
24 |
+
optionalTensorRefT,
|
25 |
+
scalar_t,
|
26 |
+
scalarT,
|
27 |
+
scalarTypeT,
|
28 |
+
SpecialArgName,
|
29 |
+
symIntArrayRefT,
|
30 |
+
SymIntT,
|
31 |
+
tensorOptionsT,
|
32 |
+
tensorT,
|
33 |
+
VectorCType,
|
34 |
+
)
|
35 |
+
|
36 |
+
# This file implements a small program synthesis engine that implements
|
37 |
+
# conversions between one API to another.
|
38 |
+
#
|
39 |
+
# The key data type in this file in NamedCType, short for Named C++ semantic type. A NamedCType
|
40 |
+
# represents a C++ type, plus semantic information about what it represents.
|
41 |
+
# For example, consider the argument "bool pin_memory"; its normal C++ type is
|
42 |
+
# "bool", but its C++ semantic type also keeps track that this represents a
|
43 |
+
# "pin_memory"; you can't just use a random other boolean in a context where you
|
44 |
+
# need a "pin_memory"!
|
45 |
+
#
|
46 |
+
# The translator takes a list of needed NamedCTypes, and then figures out how
|
47 |
+
# to construct expressions with these NamedCTypes from the given bindings. Many
|
48 |
+
# of these expressions are trivial (I need a Tensor other; there's a Tensor
|
49 |
+
# other scope); others are more nontrivial and may require packing/unpacking.
|
50 |
+
# Some examples of non-trivial action:
|
51 |
+
#
|
52 |
+
# - Need the "dtype" binding? Well, maybe "dtype" isn't available
|
53 |
+
# in the context, instead, "options" is, and you need to extract
|
54 |
+
# it from there. (Gather)
|
55 |
+
#
|
56 |
+
# - Need the "context" binding? Well, maybe "context" isn't available
|
57 |
+
# in the context, and you need to construct it from "dtype", "device",
|
58 |
+
# etc. (Scatter)
|
59 |
+
#
|
60 |
+
# - Need the "memory_format" binding? Well, actually, it's available
|
61 |
+
# from both "memory_format" and "options", so you had better make sure
|
62 |
+
# they are consistent. (Join)
|
63 |
+
|
64 |
+
options_ctype = NamedCType("options", ConstRefCType(BaseCType(tensorOptionsT)))
|
65 |
+
|
66 |
+
out_tensor_ctype = NamedCType("out", ConstRefCType(BaseCType(tensorT)))
|
67 |
+
|
68 |
+
longVec_ctype = VectorCType(BaseCType(longT))
|
69 |
+
longSymVec_ctype = VectorCType(BaseCType(SymIntT))
|
70 |
+
optionalLongVec_ctype = OptionalCType(VectorCType(BaseCType(longT)))
|
71 |
+
optionalScalar_ctype = OptionalCType(BaseCType(scalarT))
|
72 |
+
optionalTensor_ctype = OptionalCType(BaseCType(tensorT))
|
73 |
+
|
74 |
+
|
75 |
+
class UnsatError(RuntimeError):
|
76 |
+
pass
|
77 |
+
|
78 |
+
|
79 |
+
# Given a set of in-scope bindings and a set of target bindings, synthesize
|
80 |
+
# a list of expressions that uses only the in-scope bindings (bindings) that
|
81 |
+
# have all of the types of goals. You may want to use this function if
|
82 |
+
# you're generating code for a function like:
|
83 |
+
#
|
84 |
+
# void f({args}) {
|
85 |
+
# g({exprs}); // g is a different API
|
86 |
+
# }
|
87 |
+
#
|
88 |
+
# and you need to generate "exprs".
|
89 |
+
#
|
90 |
+
# Typically, a list of Bindings is convenient to get (you usually call something
|
91 |
+
# like arguments() to get them); but technically you only need less information:
|
92 |
+
# for 'bindings' an (un-ordered) list of Exprs is sufficient; similarly, for
|
93 |
+
# 'goals', an (ordered) list of NamedCType goals is sufficient. If you are doing
|
94 |
+
# something more complicated, e.g., tracking the set of bindings in a context,
|
95 |
+
# you may find using these smaller types more convenient.
|
96 |
+
def translate(
|
97 |
+
bindings: Sequence[Union[Expr, Binding]],
|
98 |
+
goals: Sequence[Union[NamedCType, Binding]],
|
99 |
+
*,
|
100 |
+
method: bool = False,
|
101 |
+
allow_expensive_conversions: bool = False,
|
102 |
+
) -> List[Expr]:
|
103 |
+
binding_exprs: List[Expr] = []
|
104 |
+
for b in bindings:
|
105 |
+
if isinstance(b, Binding):
|
106 |
+
binding_exprs.append(
|
107 |
+
Expr(
|
108 |
+
expr=b.name,
|
109 |
+
type=b.nctype,
|
110 |
+
)
|
111 |
+
)
|
112 |
+
else:
|
113 |
+
binding_exprs.append(b)
|
114 |
+
|
115 |
+
goal_ctypes: List[NamedCType] = []
|
116 |
+
for g in goals:
|
117 |
+
if isinstance(g, Binding):
|
118 |
+
goal_ctypes.append(g.nctype)
|
119 |
+
else:
|
120 |
+
goal_ctypes.append(g)
|
121 |
+
|
122 |
+
# Add all the bindings to the context
|
123 |
+
ctx: Dict[NamedCType, str] = {}
|
124 |
+
for b in binding_exprs:
|
125 |
+
ctx[b.type] = b.expr
|
126 |
+
|
127 |
+
# While we're at it, do some simple forward inference, looking through
|
128 |
+
# constructors.
|
129 |
+
#
|
130 |
+
# NB: When should you do forward inference versus backward inference?
|
131 |
+
# The general idea:
|
132 |
+
#
|
133 |
+
# - Backward inference WHEN the goal gets smaller
|
134 |
+
# - Forward inference WHEN the hypothesis gets smaller
|
135 |
+
#
|
136 |
+
# This helps ensure termination: backward inference starts with a goal
|
137 |
+
# and tries to make it simpler and simpler until it's trivial; if the
|
138 |
+
# goal can grow in size, we blow up to a really huge goal size.
|
139 |
+
# Similarly, with forward inference we take hypotheses and decompose
|
140 |
+
# them into simpler hypotheses; if hypotheses could expand in size,
|
141 |
+
# we also have potential nontermination. (In the code below, forward
|
142 |
+
# inference is only ever carried out at a single step, but you could
|
143 |
+
# imagine repeated application of forward inference being profitable.)
|
144 |
+
#
|
145 |
+
# A good starting point in the literature for exploring more about proof
|
146 |
+
# search are these lecture notes
|
147 |
+
# https://www.cs.cmu.edu/~fp/courses/oregon-m10/04-focusing.pdf
|
148 |
+
#
|
149 |
+
# TODO: My kingdom for a pattern matcher
|
150 |
+
# https://www.python.org/dev/peps/pep-0634/
|
151 |
+
#
|
152 |
+
# TODO: This could get us in recomputation trouble if b.expr is nontrivial.
|
153 |
+
# Fix this by implementing some sort of sharing so that if multiple
|
154 |
+
# goals share the same expression, we only compute it once. This seems
|
155 |
+
# to matter in practice as compiler is often unwilling to CSE nontrivial
|
156 |
+
# expressions like scalar.to<scalar_t>()
|
157 |
+
t = b.type
|
158 |
+
if (
|
159 |
+
isinstance(t, ConstRefCType)
|
160 |
+
and isinstance(t.elem, OptionalCType)
|
161 |
+
and isinstance(t.elem.elem, BaseCType)
|
162 |
+
and str(t.elem.elem.type) == "at::Tensor"
|
163 |
+
):
|
164 |
+
ctx[
|
165 |
+
NamedCType(t.elem.elem.name, ConstRefCType(BaseCType(tensorT)))
|
166 |
+
] = f"({b.expr}.has_value() ? *{b.expr} : at::Tensor())"
|
167 |
+
|
168 |
+
if t.type == ConstRefCType(OptionalCType(BaseCType(tensorT))):
|
169 |
+
ctx[
|
170 |
+
NamedCType(t.name, BaseCType(optionalTensorRefT))
|
171 |
+
] = f"(({b.expr}.has_value() && (*{b.expr}).defined()) ? at::OptionalTensorRef(*{b.expr}) : at::OptionalTensorRef())"
|
172 |
+
|
173 |
+
if t.type == ConstRefCType(BaseCType(scalarT)):
|
174 |
+
ctx[NamedCType(t.name, BaseCType(opmath_t))] = f"({b.expr}).to<opmath_t>()"
|
175 |
+
|
176 |
+
if t.type == ConstRefCType(OptionalCType(BaseCType(scalarT))):
|
177 |
+
ctx[
|
178 |
+
NamedCType(t.name, BaseCType(optionalScalarRefT))
|
179 |
+
] = f"({b.expr}.has_value() ? at::OptionalScalarRef(&({b.expr}.value())) : at::OptionalScalarRef())"
|
180 |
+
|
181 |
+
if t.type == BaseCType(scalar_t):
|
182 |
+
ctx[
|
183 |
+
NamedCType(t.name, BaseCType(opmath_t))
|
184 |
+
] = f"static_cast<opmath_t>({b.expr})"
|
185 |
+
|
186 |
+
# [Note: IOptTensorListRef]
|
187 |
+
if t.type == ConstRefCType(ListCType(OptionalCType(BaseCType(tensorT)))):
|
188 |
+
ctx[
|
189 |
+
NamedCType(t.name, BaseCType(iOptTensorListRefT))
|
190 |
+
] = f"at::IOptTensorListRef({b.expr})"
|
191 |
+
|
192 |
+
# Add implicit bindings if the generated code is inside a Tensor method
|
193 |
+
if method:
|
194 |
+
ctx[
|
195 |
+
NamedCType("self", MutRefCType(BaseCType(tensorT)))
|
196 |
+
] = "const_cast<Tensor&>(*this)"
|
197 |
+
ctx[
|
198 |
+
NamedCType("self", ConstRefCType(BaseCType(tensorT)))
|
199 |
+
] = "const_cast<Tensor&>(*this)"
|
200 |
+
# This is better! Byte-for-byte compat
|
201 |
+
# ctx[NamedCType("self", ConstRefCType(BaseCType(tensorT)))] = "*this"
|
202 |
+
|
203 |
+
def unsat(goal: NamedCType) -> NoReturn:
|
204 |
+
ctx_desc = "\n".join(
|
205 |
+
f" {t.cpp_type()} {t.name}; // {e}" for t, e in ctx.items()
|
206 |
+
)
|
207 |
+
raise UnsatError(
|
208 |
+
f"""
|
209 |
+
Failed to synthesize the expression "{goal.cpp_type()} {goal.name}".
|
210 |
+
When I failed, the following bindings were available in the context:
|
211 |
+
|
212 |
+
{ctx_desc}
|
213 |
+
|
214 |
+
This probably means there is a missing rule in the rules of torchgen.api.translate.
|
215 |
+
Check this module for more information.
|
216 |
+
"""
|
217 |
+
)
|
218 |
+
|
219 |
+
# A shitty backtracking search implementation. It's shitty because it
|
220 |
+
# does backtracking via stack (bad idea!) and for the most part tries to
|
221 |
+
# avoid backtracking. In particular, if
|
222 |
+
# direct=True, we won't try to do any fancy synthesis, just trivial
|
223 |
+
# conversions (e.g., "T a" is OK for "const T& a"). So all of the
|
224 |
+
# existing rules in this function simply try to solve immediately,
|
225 |
+
# and bail if things don't work out.
|
226 |
+
def solve(goal: NamedCType, *, direct: bool) -> str:
|
227 |
+
def direct_solve(goal: NamedCType) -> str:
|
228 |
+
return solve(goal, direct=True)
|
229 |
+
|
230 |
+
if goal in ctx:
|
231 |
+
# Trivial
|
232 |
+
return ctx[goal]
|
233 |
+
|
234 |
+
# const & is satisfied with mutable &
|
235 |
+
if isinstance(goal.type, ConstRefCType):
|
236 |
+
try:
|
237 |
+
# WARNING: not strictly decreasing; be careful not
|
238 |
+
# to add a direct conversion that goes satisfies
|
239 |
+
# mutable& with const&
|
240 |
+
return solve(
|
241 |
+
NamedCType(goal.name, MutRefCType(goal.type.elem)), direct=direct
|
242 |
+
)
|
243 |
+
except UnsatError:
|
244 |
+
pass
|
245 |
+
|
246 |
+
# mutable & is satisfied with value
|
247 |
+
if isinstance(goal.type, MutRefCType):
|
248 |
+
try:
|
249 |
+
return solve(NamedCType(goal.name, goal.type.elem), direct=direct)
|
250 |
+
except UnsatError:
|
251 |
+
pass
|
252 |
+
|
253 |
+
# TODO: These are referentially equal, shouldn't have to do this;
|
254 |
+
# ensuring we don't use type synonym IntArrayRef in codegen would
|
255 |
+
# help
|
256 |
+
if goal.type == ArrayRefCType(BaseCType(longT)):
|
257 |
+
return solve(NamedCType(goal.name, BaseCType(intArrayRefT)), direct=direct)
|
258 |
+
|
259 |
+
if direct:
|
260 |
+
unsat(goal)
|
261 |
+
|
262 |
+
# For now, all of these rules are mutually exclusive.
|
263 |
+
if goal == NamedCType("memory_format", OptionalCType(BaseCType(memoryFormatT))):
|
264 |
+
memory_format = direct_solve(
|
265 |
+
NamedCType(
|
266 |
+
SpecialArgName.possibly_redundant_memory_format,
|
267 |
+
OptionalCType(BaseCType(memoryFormatT)),
|
268 |
+
)
|
269 |
+
)
|
270 |
+
# No need to join "memory_format" and "options" if the target API takes "options" directly.
|
271 |
+
# Otherwise it will cause the redundant memory_format error.
|
272 |
+
if options_ctype in goal_ctypes:
|
273 |
+
return memory_format
|
274 |
+
try:
|
275 |
+
options = direct_solve(options_ctype)
|
276 |
+
return f"c10::impl::check_tensor_options_and_extract_memory_format({options}, {memory_format})"
|
277 |
+
except UnsatError:
|
278 |
+
return memory_format
|
279 |
+
elif goal == NamedCType("options", BaseCType(tensorOptionsT)):
|
280 |
+
dtype = direct_solve(
|
281 |
+
NamedCType("dtype", OptionalCType(BaseCType(scalarTypeT)))
|
282 |
+
)
|
283 |
+
pin_memory = direct_solve(
|
284 |
+
NamedCType("pin_memory", OptionalCType(BaseCType(boolT)))
|
285 |
+
)
|
286 |
+
device = direct_solve(
|
287 |
+
NamedCType("device", OptionalCType(BaseCType(deviceT)))
|
288 |
+
)
|
289 |
+
layout = direct_solve(
|
290 |
+
NamedCType("layout", OptionalCType(BaseCType(layoutT)))
|
291 |
+
)
|
292 |
+
return f"TensorOptions().dtype({dtype}).layout({layout}).device({device}).pinned_memory({pin_memory})"
|
293 |
+
|
294 |
+
elif goal == NamedCType("dtype", OptionalCType(BaseCType(scalarTypeT))):
|
295 |
+
try:
|
296 |
+
options = direct_solve(options_ctype)
|
297 |
+
return f"c10::optTypeMetaToScalarType({options}.dtype_opt())"
|
298 |
+
except UnsatError:
|
299 |
+
out_tensor = direct_solve(out_tensor_ctype)
|
300 |
+
return f"{out_tensor}.scalar_type()"
|
301 |
+
|
302 |
+
elif goal == NamedCType("layout", OptionalCType(BaseCType(layoutT))):
|
303 |
+
try:
|
304 |
+
options = direct_solve(options_ctype)
|
305 |
+
return f"{options}.layout_opt()"
|
306 |
+
except UnsatError:
|
307 |
+
out_tensor = direct_solve(out_tensor_ctype)
|
308 |
+
return f"{out_tensor}.layout()"
|
309 |
+
|
310 |
+
elif goal == NamedCType("device", OptionalCType(BaseCType(deviceT))):
|
311 |
+
try:
|
312 |
+
options = direct_solve(options_ctype)
|
313 |
+
return f"{options}.device_opt()"
|
314 |
+
except UnsatError:
|
315 |
+
out_tensor = direct_solve(out_tensor_ctype)
|
316 |
+
return f"{out_tensor}.device()"
|
317 |
+
|
318 |
+
elif goal == NamedCType("pin_memory", OptionalCType(BaseCType(boolT))):
|
319 |
+
try:
|
320 |
+
options = direct_solve(options_ctype)
|
321 |
+
return f"{options}.pinned_memory_opt()"
|
322 |
+
except UnsatError:
|
323 |
+
# If we're calling a factory op from its out= variant,
|
324 |
+
# We don't actually care about the value of pin_memory.
|
325 |
+
out_tensor = direct_solve(out_tensor_ctype)
|
326 |
+
return "c10::nullopt"
|
327 |
+
|
328 |
+
# We can always do translations from value types to reference types, like vector<int> -> IntArrayRef
|
329 |
+
elif goal.type == BaseCType(intArrayRefT):
|
330 |
+
try:
|
331 |
+
return direct_solve(NamedCType(goal.name, longVec_ctype))
|
332 |
+
except UnsatError:
|
333 |
+
# We can also go SymIntArrayRef -> IntArrayRef
|
334 |
+
symIntArrayRef_type = direct_solve(
|
335 |
+
NamedCType(goal.name, BaseCType(symIntArrayRefT))
|
336 |
+
)
|
337 |
+
return f"C10_AS_INTARRAYREF_SLOW({symIntArrayRef_type})"
|
338 |
+
elif goal.type == BaseCType(symIntArrayRefT):
|
339 |
+
try:
|
340 |
+
r = direct_solve(NamedCType(goal.name, BaseCType(intArrayRefT)))
|
341 |
+
return f"c10::fromIntArrayRefSlow({r})"
|
342 |
+
except UnsatError:
|
343 |
+
return direct_solve(NamedCType(goal.name, longSymVec_ctype))
|
344 |
+
elif goal.type == BaseCType(SymIntT):
|
345 |
+
return direct_solve(NamedCType(goal.name, BaseCType(longT)))
|
346 |
+
elif goal.type == OptionalCType(BaseCType(SymIntT)):
|
347 |
+
argname = direct_solve(
|
348 |
+
NamedCType(goal.name, OptionalCType(BaseCType(longT)))
|
349 |
+
)
|
350 |
+
return f"{argname}.has_value() ? c10::make_optional(c10::SymInt(*{argname})) : c10::nullopt"
|
351 |
+
elif goal.type == BaseCType(longT):
|
352 |
+
symInt_type = direct_solve(NamedCType(goal.name, BaseCType(SymIntT)))
|
353 |
+
return f"{symInt_type}.guard_int(__FILE__, __LINE__)"
|
354 |
+
elif goal.type == OptionalCType(BaseCType(longT)):
|
355 |
+
argname = direct_solve(
|
356 |
+
NamedCType(goal.name, OptionalCType(BaseCType(SymIntT)))
|
357 |
+
)
|
358 |
+
return f"{argname}.has_value() ? c10::make_optional({argname}->guard_int(__FILE__, __LINE__)) : c10::nullopt"
|
359 |
+
elif goal.type == BaseCType(optionalIntArrayRefT):
|
360 |
+
try:
|
361 |
+
return direct_solve(NamedCType(goal.name, optionalLongVec_ctype))
|
362 |
+
except UnsatError:
|
363 |
+
argname = direct_solve(
|
364 |
+
NamedCType(goal.name, BaseCType(optionalSymIntArrayRefT))
|
365 |
+
)
|
366 |
+
return f"{argname}.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*{argname})) : c10::nullopt"
|
367 |
+
elif goal.type == BaseCType(optionalSymIntArrayRefT):
|
368 |
+
# TODO: You might also want to solve this from longSymVec_ctype or
|
369 |
+
# an optional version of it
|
370 |
+
argname = direct_solve(
|
371 |
+
NamedCType(goal.name, BaseCType(optionalIntArrayRefT))
|
372 |
+
)
|
373 |
+
return f"{argname}.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*{argname})) : c10::nullopt"
|
374 |
+
elif goal.type == BaseCType(optionalScalarRefT):
|
375 |
+
return direct_solve(NamedCType(goal.name, optionalScalar_ctype))
|
376 |
+
elif goal.type == BaseCType(optionalTensorRefT):
|
377 |
+
return direct_solve(NamedCType(goal.name, optionalTensor_ctype))
|
378 |
+
|
379 |
+
# Note [translation from C++ reference to value types]
|
380 |
+
# The below cases are all for when we have an argument with a reference type,
|
381 |
+
# and a corresponding goal with a value type.
|
382 |
+
# These are needed when we populate the inputs to a lambda capture and we need
|
383 |
+
# to guarantee the lifetime of each captured argument.
|
384 |
+
# We guard it with an explicit kwarg because converting to a value type is expensive
|
385 |
+
# (O(n)) to convert from IntArrayRef to vector<int>),
|
386 |
+
# so the caller of translate() should be explicit that they need it.
|
387 |
+
if allow_expensive_conversions:
|
388 |
+
if goal.type == VectorCType(BaseCType(longT)):
|
389 |
+
intArrayRef_ctype = NamedCType(goal.name, BaseCType(intArrayRefT))
|
390 |
+
argname = direct_solve(intArrayRef_ctype)
|
391 |
+
return f"{argname}.vec()"
|
392 |
+
if goal.type == VectorCType(BaseCType(SymIntT)):
|
393 |
+
symIntArrayRef_ctype = NamedCType(goal.name, BaseCType(symIntArrayRefT))
|
394 |
+
argname = direct_solve(symIntArrayRef_ctype)
|
395 |
+
return f"{argname}.vec()"
|
396 |
+
elif goal.type == OptionalCType(VectorCType(BaseCType(longT))):
|
397 |
+
optionalIntArrayRef_ctype = NamedCType(
|
398 |
+
goal.name, BaseCType(optionalIntArrayRefT)
|
399 |
+
)
|
400 |
+
argname = direct_solve(optionalIntArrayRef_ctype)
|
401 |
+
return f"{argname}.has_value() ? c10::make_optional({argname}->vec()) : c10::nullopt"
|
402 |
+
elif goal.type == OptionalCType(BaseCType(scalarT)):
|
403 |
+
optionalScalarRef_ctype = NamedCType(
|
404 |
+
goal.name, BaseCType(optionalScalarRefT)
|
405 |
+
)
|
406 |
+
argname = direct_solve(optionalScalarRef_ctype)
|
407 |
+
return f"{argname}.has_value() ? c10::make_optional({argname}) : c10::nullopt"
|
408 |
+
elif goal.type == OptionalCType(BaseCType(scalarT)):
|
409 |
+
optionalTensorRef_ctype = NamedCType(
|
410 |
+
goal.name, BaseCType(optionalTensorRefT)
|
411 |
+
)
|
412 |
+
argname = direct_solve(optionalTensorRef_ctype)
|
413 |
+
return f"{argname}.has_value() ? c10::make_optional({argname}) : c10::nullopt"
|
414 |
+
# Technically, we also need to handle cases of C++ containers holding reference types.
|
415 |
+
# But there currently aren't any ops that require lambda capture codegen
|
416 |
+
# With arguments like std::vector<IntArrayRef>.
|
417 |
+
# If that changes, we'll have to add the translation here.
|
418 |
+
|
419 |
+
# We allow const casting on tensors, since const-correctness is a bit broken for at::Tensor.
|
420 |
+
# We could probably generalize this to non-tensor types too.
|
421 |
+
if goal.type == MutRefCType(BaseCType(tensorT)):
|
422 |
+
const_ref_tensor_ctype = NamedCType(
|
423 |
+
goal.name, ConstRefCType(BaseCType(tensorT))
|
424 |
+
)
|
425 |
+
argname = direct_solve(const_ref_tensor_ctype)
|
426 |
+
return f"const_cast<Tensor&>({argname})"
|
427 |
+
|
428 |
+
unsat(goal)
|
429 |
+
|
430 |
+
return [Expr(solve(g, direct=False), g) for g in goal_ctypes]
|
venv/lib/python3.10/site-packages/torchgen/api/types/__init__.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
from .types import *
|
2 |
+
from .types_base import *
|
3 |
+
from .signatures import * # isort:skip
|
venv/lib/python3.10/site-packages/torchgen/api/types/__pycache__/types.cpython-310.pyc
ADDED
Binary file (6.18 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/api/types/__pycache__/types_base.cpython-310.pyc
ADDED
Binary file (9.74 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/api/types/signatures.py
ADDED
@@ -0,0 +1,423 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass
|
2 |
+
|
3 |
+
from typing import Iterator, List, Optional, Sequence, Set, Tuple, Union
|
4 |
+
|
5 |
+
from torchgen.model import (
|
6 |
+
BackendIndex,
|
7 |
+
FunctionSchema,
|
8 |
+
NativeFunction,
|
9 |
+
NativeFunctionsGroup,
|
10 |
+
NativeFunctionsViewGroup,
|
11 |
+
)
|
12 |
+
|
13 |
+
from .types_base import Binding, CType, Expr
|
14 |
+
|
15 |
+
|
16 |
+
@dataclass(frozen=True)
|
17 |
+
class CppSignature:
|
18 |
+
"""
|
19 |
+
A CppSignature represents a single overload in the C++ API. For
|
20 |
+
any given function schema, there may be multiple CppSignatures
|
21 |
+
corresponding to it, based on how we desugar to C++. See also
|
22 |
+
CppSignatureGroup.
|
23 |
+
"""
|
24 |
+
|
25 |
+
# The schema this signature is derived from
|
26 |
+
func: FunctionSchema
|
27 |
+
|
28 |
+
# Is this a C++ signature for a method, i.e. Tensor::my_op(...)?
|
29 |
+
method: bool
|
30 |
+
|
31 |
+
# Is this a faithful C++ signature (i.e. following the JIT schema) or a convenience API
|
32 |
+
# (i.e. with a potential TensorOptions argument and out arguments in the front)
|
33 |
+
faithful: bool
|
34 |
+
|
35 |
+
# Is this a symint C++ signature. For BC reasons, functions that take
|
36 |
+
# SymInts still present as int64_t in C++, and the SymInt variant is
|
37 |
+
# offered at a different overload name
|
38 |
+
#
|
39 |
+
# NB: If a function RETURNS a SymInt, this is ALWAYS false
|
40 |
+
symint: bool
|
41 |
+
|
42 |
+
# The set of C++ arguments which should not have defaults applied to them
|
43 |
+
cpp_no_default_args: Set[str]
|
44 |
+
|
45 |
+
# Is this a fallback C++ binding? Fallback bindings are enabled by
|
46 |
+
# manual_cpp_binding: True and are alternate, non-public API that
|
47 |
+
# lets manual C++ binding implementors access the binding that would
|
48 |
+
# have been automatically generated
|
49 |
+
fallback_binding: bool = False
|
50 |
+
|
51 |
+
# Return the unpacked argument structure of this signature,
|
52 |
+
# discarding information about which arguments are semantically
|
53 |
+
# related to each other.
|
54 |
+
def arguments(self) -> Sequence[Binding]:
|
55 |
+
return cpp.arguments(
|
56 |
+
self.func.arguments,
|
57 |
+
faithful=self.faithful,
|
58 |
+
symint=self.symint,
|
59 |
+
method=self.method,
|
60 |
+
cpp_no_default_args=self.cpp_no_default_args,
|
61 |
+
)
|
62 |
+
|
63 |
+
def name(self, *, suppress_symint_suffix: bool = False) -> str:
|
64 |
+
n = cpp.name(
|
65 |
+
self.func,
|
66 |
+
faithful_name_for_out_overloads=self.faithful,
|
67 |
+
symint_overload=False if suppress_symint_suffix else self.symint,
|
68 |
+
)
|
69 |
+
if self.fallback_binding:
|
70 |
+
n = f"__dispatch_{n}"
|
71 |
+
return n
|
72 |
+
|
73 |
+
# Render the C++ declaration for this signature
|
74 |
+
def decl(
|
75 |
+
self,
|
76 |
+
*,
|
77 |
+
name: Optional[str] = None,
|
78 |
+
prefix: str = "",
|
79 |
+
is_redispatching_fn: bool = False,
|
80 |
+
suppress_symint_suffix: bool = False,
|
81 |
+
) -> str:
|
82 |
+
returns_type = cpp.returns_type(
|
83 |
+
self.func.returns, symint=self.symint
|
84 |
+
).cpp_type()
|
85 |
+
cpp_args = [a.decl() for a in self.arguments()]
|
86 |
+
if is_redispatching_fn:
|
87 |
+
cpp_args = ["c10::DispatchKeySet dispatchKeySet"] + cpp_args
|
88 |
+
cpp_args_str = ", ".join(cpp_args)
|
89 |
+
if name is None:
|
90 |
+
name = prefix + self.name(suppress_symint_suffix=suppress_symint_suffix)
|
91 |
+
return f"{returns_type} {name}({cpp_args_str})"
|
92 |
+
|
93 |
+
# Render the C++ definition for this signature, not including
|
94 |
+
# the body (with curly braces)
|
95 |
+
def defn(
|
96 |
+
self,
|
97 |
+
*,
|
98 |
+
name: Optional[str] = None,
|
99 |
+
prefix: str = "",
|
100 |
+
is_redispatching_fn: bool = False,
|
101 |
+
) -> str:
|
102 |
+
returns_type = cpp.returns_type(
|
103 |
+
self.func.returns, symint=self.symint
|
104 |
+
).cpp_type()
|
105 |
+
cpp_args = [a.defn() for a in self.arguments()]
|
106 |
+
if is_redispatching_fn:
|
107 |
+
cpp_args = ["c10::DispatchKeySet dispatchKeySet"] + cpp_args
|
108 |
+
cpp_args_str = ", ".join(cpp_args)
|
109 |
+
if name is None:
|
110 |
+
name = prefix + self.name()
|
111 |
+
return f"{returns_type} {name}({cpp_args_str})"
|
112 |
+
|
113 |
+
def ptr_type(self) -> str:
|
114 |
+
args_types_str = ", ".join(a.type for a in self.arguments())
|
115 |
+
return f"{cpp.returns_type(self.func.returns, symint=self.symint).cpp_type()} (*)({args_types_str})"
|
116 |
+
|
117 |
+
# Return the C++ function type, e.g., something like int(bool)
|
118 |
+
def type(self) -> str:
|
119 |
+
args_types_str = ", ".join(a.type for a in self.arguments())
|
120 |
+
return f"{cpp.returns_type(self.func.returns, symint=self.symint).cpp_type()} ({args_types_str})"
|
121 |
+
|
122 |
+
|
123 |
+
# Represents group of all CppSignatures associated with a
|
124 |
+
# FunctionSchema. Right now, that's the regular, user-visible
|
125 |
+
# signature, as well as a "faithful" signature which doesn't
|
126 |
+
# have grouping.
|
127 |
+
@dataclass(frozen=True)
|
128 |
+
class CppSignatureGroup:
|
129 |
+
func: FunctionSchema
|
130 |
+
signature: CppSignature
|
131 |
+
faithful_signature: Optional[CppSignature]
|
132 |
+
symint_signature: Optional[CppSignature]
|
133 |
+
symint_faithful_signature: Optional[CppSignature]
|
134 |
+
|
135 |
+
def most_faithful_signature(self) -> CppSignature:
|
136 |
+
if self.faithful_signature:
|
137 |
+
return self.faithful_signature
|
138 |
+
else:
|
139 |
+
return self.signature
|
140 |
+
|
141 |
+
def signatures(self, *, symint: bool = True) -> Iterator[CppSignature]:
|
142 |
+
yield self.signature
|
143 |
+
if self.faithful_signature:
|
144 |
+
yield self.faithful_signature
|
145 |
+
if symint:
|
146 |
+
if self.symint_signature:
|
147 |
+
yield self.symint_signature
|
148 |
+
if self.symint_faithful_signature:
|
149 |
+
yield self.symint_faithful_signature
|
150 |
+
|
151 |
+
@staticmethod
|
152 |
+
def from_native_function(
|
153 |
+
f: NativeFunction, *, method: bool, fallback_binding: bool = False
|
154 |
+
) -> "CppSignatureGroup":
|
155 |
+
func = f.func
|
156 |
+
|
157 |
+
def make_sig(*, faithful: bool, symint: bool) -> CppSignature:
|
158 |
+
return CppSignature(
|
159 |
+
func=func,
|
160 |
+
faithful=faithful,
|
161 |
+
symint=symint,
|
162 |
+
method=method,
|
163 |
+
fallback_binding=fallback_binding,
|
164 |
+
cpp_no_default_args=f.cpp_no_default_args,
|
165 |
+
)
|
166 |
+
|
167 |
+
def make_sigs(*, symint: bool) -> Tuple[CppSignature, Optional[CppSignature]]:
|
168 |
+
faithful_signature: Optional[CppSignature] = None
|
169 |
+
if func.arguments.tensor_options is not None or len(func.arguments.out) > 0:
|
170 |
+
faithful_signature = make_sig(faithful=True, symint=symint)
|
171 |
+
signature = make_sig(faithful=False, symint=symint)
|
172 |
+
return signature, faithful_signature
|
173 |
+
|
174 |
+
signature, faithful_signature = make_sigs(symint=False)
|
175 |
+
symint_signature: Optional[CppSignature] = None
|
176 |
+
symint_faithful_signature: Optional[CppSignature] = None
|
177 |
+
if func.has_symint():
|
178 |
+
symint_signature, symint_faithful_signature = make_sigs(symint=True)
|
179 |
+
|
180 |
+
return CppSignatureGroup(
|
181 |
+
func=func,
|
182 |
+
signature=signature,
|
183 |
+
faithful_signature=faithful_signature,
|
184 |
+
symint_signature=symint_signature,
|
185 |
+
symint_faithful_signature=symint_faithful_signature,
|
186 |
+
)
|
187 |
+
|
188 |
+
|
189 |
+
@dataclass(frozen=True)
|
190 |
+
class DispatcherSignature:
|
191 |
+
# The schema this signature is derived from
|
192 |
+
func: FunctionSchema
|
193 |
+
|
194 |
+
# Allows you to prepend an arbitrary prefix to the signature name.
|
195 |
+
# This is useful for parts of the codegen that generate wrappers around kernels,
|
196 |
+
# and need to avoid naming collisions.
|
197 |
+
prefix: str = ""
|
198 |
+
|
199 |
+
symint: bool = True
|
200 |
+
|
201 |
+
def arguments(self) -> List[Binding]:
|
202 |
+
return dispatcher.arguments(self.func, symint=self.symint)
|
203 |
+
|
204 |
+
def name(self) -> str:
|
205 |
+
return self.prefix + dispatcher.name(self.func)
|
206 |
+
|
207 |
+
def decl(self, name: Optional[str] = None) -> str:
|
208 |
+
args_str = ", ".join(a.decl() for a in self.arguments())
|
209 |
+
if name is None:
|
210 |
+
name = self.name()
|
211 |
+
return f"{self.returns_type().cpp_type()} {name}({args_str})"
|
212 |
+
|
213 |
+
def defn(
|
214 |
+
self, name: Optional[str] = None, *, is_redispatching_fn: bool = False
|
215 |
+
) -> str:
|
216 |
+
args = [a.defn() for a in self.arguments()]
|
217 |
+
if is_redispatching_fn:
|
218 |
+
args = ["c10::DispatchKeySet dispatchKeySet"] + args
|
219 |
+
args_str = ", ".join(args)
|
220 |
+
if name is None:
|
221 |
+
name = self.name()
|
222 |
+
return f"{self.returns_type().cpp_type()} {name}({args_str})"
|
223 |
+
|
224 |
+
def exprs(self) -> List[Expr]:
|
225 |
+
return [Expr(a.name, a.nctype) for a in self.arguments()]
|
226 |
+
|
227 |
+
def returns_type(self) -> CType:
|
228 |
+
return dispatcher.returns_type(self.func.returns, symint=self.symint)
|
229 |
+
|
230 |
+
def ptr_type(self) -> str:
|
231 |
+
dispatcher_args_types_str = ", ".join(a.type for a in self.arguments())
|
232 |
+
return f"{self.returns_type().cpp_type()} (*)({dispatcher_args_types_str})"
|
233 |
+
|
234 |
+
# Return the C++ function type, e.g., something like int(bool)
|
235 |
+
def type(self) -> str:
|
236 |
+
dispatcher_args_types_str = ", ".join(a.type for a in self.arguments())
|
237 |
+
return f"{self.returns_type().cpp_type()} ({dispatcher_args_types_str})"
|
238 |
+
|
239 |
+
@staticmethod
|
240 |
+
def from_schema(
|
241 |
+
func: FunctionSchema, *, prefix: str = "", symint: bool = True
|
242 |
+
) -> "DispatcherSignature":
|
243 |
+
return DispatcherSignature(func, prefix, symint)
|
244 |
+
|
245 |
+
|
246 |
+
@dataclass(frozen=True)
|
247 |
+
class NativeSignature:
|
248 |
+
# The schema this signature is derived from
|
249 |
+
func: FunctionSchema
|
250 |
+
|
251 |
+
symint: bool
|
252 |
+
|
253 |
+
prefix: str = ""
|
254 |
+
|
255 |
+
def name(self) -> str:
|
256 |
+
return self.prefix + native.name(self.func)
|
257 |
+
|
258 |
+
def decl(self, name: Optional[str] = None) -> str:
|
259 |
+
args_str = ", ".join(a.decl() for a in self.arguments())
|
260 |
+
if name is None:
|
261 |
+
name = self.name()
|
262 |
+
return f"{native.returns_type(self.func.returns, symint=self.symint).cpp_type()} {name}({args_str})"
|
263 |
+
|
264 |
+
def defn(self, name: Optional[str] = None) -> str:
|
265 |
+
args_str = ", ".join(a.defn() for a in self.arguments())
|
266 |
+
if name is None:
|
267 |
+
name = self.name()
|
268 |
+
return f"{native.returns_type(self.func.returns, symint=self.symint).cpp_type()} {name}({args_str})"
|
269 |
+
|
270 |
+
def ptr_type(self) -> str:
|
271 |
+
# don't include defaults in type signature!
|
272 |
+
args_str = ", ".join(a.defn() for a in self.arguments())
|
273 |
+
return f"{native.returns_type(self.func.returns, symint=self.symint).cpp_type()} (*)({args_str})"
|
274 |
+
|
275 |
+
def arguments(self) -> List[Binding]:
|
276 |
+
return native.arguments(self.func, symint=self.symint)
|
277 |
+
|
278 |
+
def returns_type(self) -> CType:
|
279 |
+
return native.returns_type(self.func.returns, symint=self.symint)
|
280 |
+
|
281 |
+
def dispatcher_exprs(self) -> List[Expr]:
|
282 |
+
return translate.translate(
|
283 |
+
self.arguments(), dispatcher.arguments(self.func), method=False
|
284 |
+
)
|
285 |
+
|
286 |
+
|
287 |
+
@dataclass(frozen=True)
|
288 |
+
class ViewInverseSignature:
|
289 |
+
g: NativeFunctionsViewGroup
|
290 |
+
|
291 |
+
def name(self) -> str:
|
292 |
+
return functionalization.reverse_name(self.g.view, include_namespace=False)
|
293 |
+
|
294 |
+
def decl(self) -> str:
|
295 |
+
return_type = functionalization.returns_type(self.g.view.func)
|
296 |
+
decls = [
|
297 |
+
a.decl()
|
298 |
+
for a in functionalization.inner_arguments(
|
299 |
+
self.g.view.func, is_reverse=True
|
300 |
+
)
|
301 |
+
]
|
302 |
+
return f"static {return_type.cpp_type()} {self.name()}({', '.join(decls)});"
|
303 |
+
|
304 |
+
|
305 |
+
@dataclass(frozen=True)
|
306 |
+
class FunctionalizationLambda:
|
307 |
+
g: NativeFunctionsViewGroup
|
308 |
+
|
309 |
+
# are we generating the forward lambda or the reverse lambda?
|
310 |
+
is_reverse: bool
|
311 |
+
|
312 |
+
def captures(self) -> List[Expr]:
|
313 |
+
# The lambda lives inside of a kernel following the dispatcher API, so its outer context is the dispatcher arguments
|
314 |
+
# We also need to read the "reapply views" TLS at the time that the functionalization kernel was executed,
|
315 |
+
# and plumb it into the lambda.
|
316 |
+
outer_ctx = dispatcher.arguments(self.g.view.func) + [
|
317 |
+
functionalization.reapply_views_binding,
|
318 |
+
functionalization.inverse_return_mode_binding,
|
319 |
+
]
|
320 |
+
capture_bindings = functionalization.capture_arguments(
|
321 |
+
self.g.view.func, is_reverse=self.is_reverse
|
322 |
+
)
|
323 |
+
# allow_expensive_conversions is set because we want to convert
|
324 |
+
# some reference types (IntArrayRef) to value types (vector<int64_t>).
|
325 |
+
capture_exprs = translate.translate(
|
326 |
+
outer_ctx, capture_bindings, method=False, allow_expensive_conversions=True
|
327 |
+
)
|
328 |
+
return capture_exprs
|
329 |
+
|
330 |
+
def decl(self) -> str:
|
331 |
+
return_type = functionalization.returns_type(self.g.view.func)
|
332 |
+
capture_str = ", ".join(
|
333 |
+
f"{val.type.name} = {val.expr}" for val in self.captures()
|
334 |
+
)
|
335 |
+
decls = [
|
336 |
+
a.decl()
|
337 |
+
for a in functionalization.outer_arguments(is_reverse=self.is_reverse)
|
338 |
+
]
|
339 |
+
return f"[{capture_str}]({', '.join(decls)}) -> {return_type.cpp_type()}"
|
340 |
+
|
341 |
+
def inner_call(self, *, reapply_views: Optional[bool] = None) -> str:
|
342 |
+
inner_call_name = functionalization.name(
|
343 |
+
self.g,
|
344 |
+
is_reverse=self.is_reverse,
|
345 |
+
include_namespace=True,
|
346 |
+
reapply_views=reapply_views,
|
347 |
+
)
|
348 |
+
|
349 |
+
arg_ctx = functionalization.outer_arguments(is_reverse=self.is_reverse)
|
350 |
+
capture_ctx = functionalization.capture_arguments(
|
351 |
+
self.g.view.func, is_reverse=self.is_reverse
|
352 |
+
)
|
353 |
+
full_ctx = arg_ctx + capture_ctx
|
354 |
+
|
355 |
+
assert self.g.view_copy is not None
|
356 |
+
call_bindings = functionalization.inner_arguments(
|
357 |
+
self.g.view_copy.func, is_reverse=self.is_reverse
|
358 |
+
)
|
359 |
+
maybe_index = functionalization.inner_call_index(self.g.view_copy.func)
|
360 |
+
call_exprs = [
|
361 |
+
e.expr for e in translate.translate(full_ctx, call_bindings, method=False)
|
362 |
+
]
|
363 |
+
if not self.is_reverse and maybe_index is not None:
|
364 |
+
return f'{inner_call_name}({", ".join(call_exprs)})[{maybe_index.name}];'
|
365 |
+
else:
|
366 |
+
return f'{inner_call_name}({", ".join(call_exprs)});'
|
367 |
+
|
368 |
+
@staticmethod
|
369 |
+
def from_func(
|
370 |
+
g: NativeFunctionsViewGroup, *, is_reverse: bool
|
371 |
+
) -> "FunctionalizationLambda":
|
372 |
+
return FunctionalizationLambda(g, is_reverse)
|
373 |
+
|
374 |
+
|
375 |
+
@dataclass(frozen=True)
|
376 |
+
class StructuredImplSignature:
|
377 |
+
g: NativeFunctionsGroup
|
378 |
+
name: str
|
379 |
+
|
380 |
+
def defn(self, name: Optional[str] = None) -> str:
|
381 |
+
args_str = ", ".join(a.defn() for a in self.arguments())
|
382 |
+
return f"TORCH_IMPL_FUNC({self.name})({args_str})"
|
383 |
+
|
384 |
+
def arguments(self) -> List[Binding]:
|
385 |
+
return structured.impl_arguments(self.g)
|
386 |
+
|
387 |
+
|
388 |
+
# Helper functions
|
389 |
+
|
390 |
+
|
391 |
+
def kernel_signature(
|
392 |
+
f: NativeFunction, backend_index: BackendIndex, *, prefix: str = ""
|
393 |
+
) -> Union["NativeSignature", "DispatcherSignature"]:
|
394 |
+
# Note [External Backends Follow Dispatcher API]
|
395 |
+
# Kernel signatures for in-tree backends follow the "native" API,
|
396 |
+
# while kernels for out-of-tree backends follow the dispatcher API.
|
397 |
+
# See the comments in `native.py` for details, but historically there have been
|
398 |
+
# some small differences in schema convention between them and the Dispatcher API.
|
399 |
+
# Any differences that require translating between the two will results in a runtime cost,
|
400 |
+
# so we'd like to keep the differences as small as possible.
|
401 |
+
# With external backends, we'd like to enforce that they write their kernels with schemas
|
402 |
+
# that match the Dispatcher API directly, if they can.
|
403 |
+
meta = backend_index.get_kernel(f)
|
404 |
+
symint = meta is not None and meta.supports_symint()
|
405 |
+
if symint:
|
406 |
+
assert (
|
407 |
+
f.func.has_symint()
|
408 |
+
), f"attempted to define symint kernel for {backend_index.dispatch_key} without SymInt in schema"
|
409 |
+
if backend_index.external:
|
410 |
+
return DispatcherSignature.from_schema(f.func, prefix=prefix, symint=symint)
|
411 |
+
else:
|
412 |
+
return NativeSignature(f.func, prefix=prefix, symint=symint)
|
413 |
+
|
414 |
+
|
415 |
+
# Functions only, no types
|
416 |
+
from torchgen.api import (
|
417 |
+
cpp,
|
418 |
+
dispatcher,
|
419 |
+
functionalization,
|
420 |
+
native,
|
421 |
+
structured,
|
422 |
+
translate,
|
423 |
+
)
|
venv/lib/python3.10/site-packages/torchgen/api/types/types.py
ADDED
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Where should I add a new type? `types_base.py` vs `types.py`
|
3 |
+
|
4 |
+
This file defines data model classes for torchgen typing system, as well as some base types such as int32_t.
|
5 |
+
|
6 |
+
`types.py` defines ATen Tensor type and some c10 types, along with signatures that use these types.
|
7 |
+
|
8 |
+
The difference between these two files, is `types_base.py` should be implementation-agnostic, meaning it shouldn't
|
9 |
+
contain any type definition that is tight to a specific C++ library (e.g., ATen), so that it can be easily reused
|
10 |
+
if we want to generate code for another C++ library.
|
11 |
+
|
12 |
+
Add new types to `types.py` if these types are ATen/c10 related.
|
13 |
+
Add new types to `types_base.py` if they are basic and not attached to ATen/c10.
|
14 |
+
"""
|
15 |
+
from dataclasses import dataclass
|
16 |
+
from typing import Dict
|
17 |
+
|
18 |
+
from torchgen.model import BaseTy, ScalarType
|
19 |
+
|
20 |
+
from .types_base import (
|
21 |
+
BaseCppType,
|
22 |
+
BaseCType,
|
23 |
+
boolT,
|
24 |
+
byteT,
|
25 |
+
charT,
|
26 |
+
CType,
|
27 |
+
doubleT,
|
28 |
+
floatT,
|
29 |
+
int32T,
|
30 |
+
longT,
|
31 |
+
shortT,
|
32 |
+
)
|
33 |
+
|
34 |
+
|
35 |
+
TENSOR_LIST_LIKE_CTYPES = [
|
36 |
+
"at::TensorList",
|
37 |
+
"const c10::List<c10::optional<at::Tensor>> &",
|
38 |
+
"const at::ITensorListRef &",
|
39 |
+
]
|
40 |
+
|
41 |
+
|
42 |
+
halfT = BaseCppType("at", "Half")
|
43 |
+
complexHalfT = BaseCppType(
|
44 |
+
"c10", "complex<c10::Half>"
|
45 |
+
) # stuffing template param here is an abuse
|
46 |
+
complexFloatT = BaseCppType("c10", "complex<float>")
|
47 |
+
complexDoubleT = BaseCppType("c10", "complex<double>")
|
48 |
+
bfloat16T = BaseCppType("at", "BFloat16")
|
49 |
+
float8_e5m2T = BaseCppType("at", "Float8_e5m2")
|
50 |
+
float8_e5m2fnuzT = BaseCppType("at", "Float8_e5m2fnuz")
|
51 |
+
float8_e4m3fnT = BaseCppType("at", "Float8_e4m3fn")
|
52 |
+
float8_e4m3fnuzT = BaseCppType("at", "Float8_e4m3fnuz")
|
53 |
+
stringT = BaseCppType("c10", "string_view")
|
54 |
+
generatorT = BaseCppType("at", "Generator")
|
55 |
+
scalarTypeT = BaseCppType("at", "ScalarType")
|
56 |
+
tensorT = BaseCppType("at", "Tensor")
|
57 |
+
optionalTensorRefT = BaseCppType("at", "OptionalTensorRef")
|
58 |
+
tensorListT = BaseCppType("at", "TensorList")
|
59 |
+
iTensorListRefT = BaseCppType("at", "ITensorListRef")
|
60 |
+
iOptTensorListRefT = BaseCppType("at", "IOptTensorListRef")
|
61 |
+
dimnameT = BaseCppType("at", "Dimname")
|
62 |
+
dimnameListT = BaseCppType("at", "DimnameList")
|
63 |
+
dimVectorT = BaseCppType("at", "DimVector")
|
64 |
+
layoutT = BaseCppType("at", "Layout")
|
65 |
+
deviceT = BaseCppType("at", "Device")
|
66 |
+
deviceIndexT = BaseCppType("at", "DeviceIndex")
|
67 |
+
scalarT = BaseCppType("at", "Scalar")
|
68 |
+
optionalScalarRefT = BaseCppType("at", "OptionalScalarRef")
|
69 |
+
memoryFormatT = BaseCppType("at", "MemoryFormat")
|
70 |
+
qschemeT = BaseCppType("at", "QScheme")
|
71 |
+
storageT = BaseCppType("at", "Storage")
|
72 |
+
streamT = BaseCppType("at", "Stream")
|
73 |
+
intArrayRefT = BaseCppType("at", "IntArrayRef")
|
74 |
+
optionalIntArrayRefT = BaseCppType("at", "OptionalIntArrayRef")
|
75 |
+
optionalSymIntArrayRefT = BaseCppType("at", "OptionalSymIntArrayRef")
|
76 |
+
tensorOptionsT = BaseCppType("at", "TensorOptions")
|
77 |
+
typeAndSizeT = BaseCppType("torch::autograd::generated", "TypeAndSize")
|
78 |
+
tensorGeometryT = BaseCppType("at", "TensorGeometry")
|
79 |
+
SymIntT = BaseCppType("c10", "SymInt")
|
80 |
+
symIntArrayRefT = BaseCppType("c10", "SymIntArrayRef")
|
81 |
+
|
82 |
+
# Types representing template parameters. Technically, we probably shouldn't
|
83 |
+
# represent them this way in codegen, but it was pretty convenient.
|
84 |
+
scalar_t = BaseCppType("", "scalar_t")
|
85 |
+
opmath_t = BaseCppType("", "opmath_t")
|
86 |
+
|
87 |
+
ScalarTypeToCppMapping: Dict[ScalarType, BaseCppType] = {
|
88 |
+
ScalarType.Byte: byteT,
|
89 |
+
ScalarType.Char: charT,
|
90 |
+
ScalarType.Short: shortT,
|
91 |
+
ScalarType.Int: int32T,
|
92 |
+
ScalarType.Long: longT,
|
93 |
+
ScalarType.Half: halfT,
|
94 |
+
ScalarType.Float: floatT,
|
95 |
+
ScalarType.Double: doubleT,
|
96 |
+
ScalarType.ComplexHalf: complexHalfT,
|
97 |
+
ScalarType.ComplexFloat: complexFloatT,
|
98 |
+
ScalarType.ComplexDouble: complexDoubleT,
|
99 |
+
ScalarType.Bool: boolT,
|
100 |
+
ScalarType.Float8_e5m2: float8_e5m2T,
|
101 |
+
ScalarType.Float8_e5m2fnuz: float8_e5m2fnuzT,
|
102 |
+
ScalarType.Float8_e4m3fn: float8_e4m3fnT,
|
103 |
+
ScalarType.Float8_e4m3fnuz: float8_e4m3fnuzT,
|
104 |
+
}
|
105 |
+
|
106 |
+
BaseTypeToCppMapping: Dict[BaseTy, BaseCppType] = {
|
107 |
+
BaseTy.int: longT,
|
108 |
+
BaseTy.float: doubleT,
|
109 |
+
BaseTy.bool: boolT,
|
110 |
+
BaseTy.str: stringT,
|
111 |
+
BaseTy.Generator: generatorT,
|
112 |
+
BaseTy.ScalarType: scalarTypeT,
|
113 |
+
BaseTy.Tensor: tensorT,
|
114 |
+
BaseTy.Dimname: dimnameT,
|
115 |
+
BaseTy.DimVector: dimVectorT,
|
116 |
+
BaseTy.Layout: layoutT,
|
117 |
+
BaseTy.Device: deviceT,
|
118 |
+
BaseTy.DeviceIndex: deviceIndexT,
|
119 |
+
BaseTy.Scalar: scalarT,
|
120 |
+
BaseTy.MemoryFormat: memoryFormatT,
|
121 |
+
BaseTy.QScheme: qschemeT,
|
122 |
+
BaseTy.Storage: storageT,
|
123 |
+
BaseTy.Stream: streamT,
|
124 |
+
BaseTy.SymInt: SymIntT,
|
125 |
+
}
|
126 |
+
|
127 |
+
# CTypes encode C++ type structure as needed for translation.
|
128 |
+
|
129 |
+
|
130 |
+
@dataclass(frozen=True)
|
131 |
+
class OptionalCType(CType):
|
132 |
+
elem: "CType"
|
133 |
+
|
134 |
+
def cpp_type(self, *, strip_ref: bool = False) -> str:
|
135 |
+
# Do not pass `strip_ref` recursively.
|
136 |
+
return f"c10::optional<{self.elem.cpp_type()}>"
|
137 |
+
|
138 |
+
def cpp_type_registration_declarations(self) -> str:
|
139 |
+
return f"c10::optional<{self.elem.cpp_type_registration_declarations()}>"
|
140 |
+
|
141 |
+
def remove_const_ref(self) -> "CType":
|
142 |
+
return OptionalCType(self.elem.remove_const_ref())
|
143 |
+
|
144 |
+
|
145 |
+
@dataclass(frozen=True)
|
146 |
+
class ListCType(CType):
|
147 |
+
elem: "CType"
|
148 |
+
|
149 |
+
def cpp_type(self, *, strip_ref: bool = False) -> str:
|
150 |
+
# Do not pass `strip_ref` recursively.
|
151 |
+
return f"c10::List<{self.elem.cpp_type()}>"
|
152 |
+
|
153 |
+
def cpp_type_registration_declarations(self) -> str:
|
154 |
+
return f"c10::List<{self.elem.cpp_type_registration_declarations()}>"
|
155 |
+
|
156 |
+
def remove_const_ref(self) -> "CType":
|
157 |
+
return ListCType(self.elem.remove_const_ref())
|
158 |
+
|
159 |
+
|
160 |
+
@dataclass(frozen=True)
|
161 |
+
class ArrayRefCType(CType):
|
162 |
+
elem: "CType"
|
163 |
+
|
164 |
+
def cpp_type(self, *, strip_ref: bool = False) -> str:
|
165 |
+
# Do not pass `strip_ref` recursively.
|
166 |
+
return f"at::ArrayRef<{self.elem.cpp_type()}>"
|
167 |
+
|
168 |
+
def cpp_type_registration_declarations(self) -> str:
|
169 |
+
return f"ArrayRef<{self.elem.cpp_type_registration_declarations()}>"
|
170 |
+
|
171 |
+
def remove_const_ref(self) -> "CType":
|
172 |
+
return ArrayRefCType(self.elem.remove_const_ref())
|
173 |
+
|
174 |
+
|
175 |
+
@dataclass(frozen=True)
|
176 |
+
class VectorizedCType(CType):
|
177 |
+
# This template is explicitly specialized, so the only valid
|
178 |
+
# elems are those we have specializations for (e.g., float, double, ...)
|
179 |
+
# scalar_t is also a common argument here (when we are codegen in
|
180 |
+
# a templated context)
|
181 |
+
elem: BaseCType
|
182 |
+
|
183 |
+
def cpp_type(self, *, strip_ref: bool = False) -> str:
|
184 |
+
return f"at::vec::Vectorized<{self.elem.cpp_type()}>"
|
185 |
+
|
186 |
+
def cpp_type_registration_declarations(self) -> str:
|
187 |
+
raise NotImplementedError
|
188 |
+
|
189 |
+
def remove_const_ref(self) -> "CType":
|
190 |
+
return self
|
venv/lib/python3.10/site-packages/torchgen/api/types/types_base.py
ADDED
@@ -0,0 +1,270 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Where should I add a new type? `types_base.py` vs `types.py`
|
3 |
+
|
4 |
+
This file defines data model classes for torchgen typing system, as well as some base types such as int32_t.
|
5 |
+
|
6 |
+
`types.py` defines ATen Tensor type and some c10 types, along with signatures that use these types.
|
7 |
+
|
8 |
+
The difference between these two files, is `types_base.py` should be implementation-agnostic, meaning it shouldn't
|
9 |
+
contain any type definition that is tight to a specific C++ library (e.g., ATen), so that it can be easily reused
|
10 |
+
if we want to generate code for another C++ library.
|
11 |
+
|
12 |
+
Add new types to `types.py` if these types are ATen/c10 related.
|
13 |
+
Add new types to `types_base.py` if they are basic and not attached to ATen/c10.
|
14 |
+
"""
|
15 |
+
from abc import ABC, abstractmethod
|
16 |
+
from dataclasses import dataclass
|
17 |
+
from enum import auto, Enum
|
18 |
+
from typing import List, Optional, Union
|
19 |
+
|
20 |
+
from torchgen.model import Argument, SelfArgument, TensorOptionsArguments
|
21 |
+
|
22 |
+
# An ArgName is just the str name of the argument in schema;
|
23 |
+
# but in some special circumstances, we may add a little extra
|
24 |
+
# context. The Enum SpecialArgName covers all of these cases;
|
25 |
+
# grep for their construction sites to see when they can occur.
|
26 |
+
|
27 |
+
|
28 |
+
class SpecialArgName(Enum):
|
29 |
+
possibly_redundant_memory_format = auto()
|
30 |
+
|
31 |
+
|
32 |
+
ArgName = Union[str, SpecialArgName]
|
33 |
+
|
34 |
+
|
35 |
+
# This class shouldn't be created directly; instead, use/create one of the singletons below.
|
36 |
+
@dataclass(frozen=True)
|
37 |
+
class BaseCppType:
|
38 |
+
ns: Optional[str]
|
39 |
+
name: str
|
40 |
+
|
41 |
+
def __str__(self) -> str:
|
42 |
+
if self.ns is None or self.ns == "":
|
43 |
+
return self.name
|
44 |
+
return f"{self.ns}::{self.name}"
|
45 |
+
|
46 |
+
|
47 |
+
# The set of all non-templated, valid, fully-qualified names of C++ types that are used in the codegen.
|
48 |
+
# Templated types get their own dataclass, mainly to make namespace parsing easier.
|
49 |
+
byteT = BaseCppType("", "uint8_t")
|
50 |
+
charT = BaseCppType("", "int8_t")
|
51 |
+
shortT = BaseCppType("", "int16_t")
|
52 |
+
# It would be more symmetric for this to be called intT, but it easy to mix
|
53 |
+
# this up with JIT int (which is int64_t in C++), so we intentionally don't
|
54 |
+
# define intT to make it obvious when you've stuffed it up
|
55 |
+
int32T = BaseCppType("", "int32_t")
|
56 |
+
longT = BaseCppType("", "int64_t")
|
57 |
+
doubleT = BaseCppType("", "double")
|
58 |
+
floatT = BaseCppType("", "float")
|
59 |
+
boolT = BaseCppType("", "bool")
|
60 |
+
voidT = BaseCppType("", "void")
|
61 |
+
|
62 |
+
|
63 |
+
class CType(ABC):
|
64 |
+
@abstractmethod
|
65 |
+
def cpp_type(self, *, strip_ref: bool = False) -> str:
|
66 |
+
raise NotImplementedError
|
67 |
+
|
68 |
+
@abstractmethod
|
69 |
+
def cpp_type_registration_declarations(self) -> str:
|
70 |
+
raise NotImplementedError
|
71 |
+
|
72 |
+
@abstractmethod
|
73 |
+
def remove_const_ref(self) -> "CType":
|
74 |
+
return self
|
75 |
+
|
76 |
+
|
77 |
+
@dataclass(frozen=True)
|
78 |
+
class BaseCType(CType):
|
79 |
+
type: BaseCppType
|
80 |
+
|
81 |
+
def cpp_type(self, *, strip_ref: bool = False) -> str:
|
82 |
+
return str(self.type)
|
83 |
+
|
84 |
+
# For BC reasons, we don't want to introduce at:: namespaces to RegistrationDeclarations.yaml
|
85 |
+
# TODO: Kill this when we eventually remove it!
|
86 |
+
def cpp_type_registration_declarations(self) -> str:
|
87 |
+
return str(self.type).replace("at::", "")
|
88 |
+
|
89 |
+
def remove_const_ref(self) -> "CType":
|
90 |
+
return self
|
91 |
+
|
92 |
+
|
93 |
+
@dataclass(frozen=True)
|
94 |
+
class ConstRefCType(CType):
|
95 |
+
elem: "CType"
|
96 |
+
|
97 |
+
def cpp_type(self, *, strip_ref: bool = False) -> str:
|
98 |
+
if strip_ref:
|
99 |
+
return self.elem.cpp_type(strip_ref=strip_ref)
|
100 |
+
return f"const {self.elem.cpp_type()} &"
|
101 |
+
|
102 |
+
def cpp_type_registration_declarations(self) -> str:
|
103 |
+
return f"const {self.elem.cpp_type_registration_declarations()} &"
|
104 |
+
|
105 |
+
def remove_const_ref(self) -> "CType":
|
106 |
+
return self.elem.remove_const_ref()
|
107 |
+
|
108 |
+
|
109 |
+
@dataclass(frozen=True)
|
110 |
+
class VectorCType(CType):
|
111 |
+
elem: "CType"
|
112 |
+
|
113 |
+
def cpp_type(self, *, strip_ref: bool = False) -> str:
|
114 |
+
# Do not pass `strip_ref` recursively.
|
115 |
+
return f"::std::vector<{self.elem.cpp_type()}>"
|
116 |
+
|
117 |
+
def cpp_type_registration_declarations(self) -> str:
|
118 |
+
return f"::std::vector<{self.elem.cpp_type_registration_declarations()}>"
|
119 |
+
|
120 |
+
def remove_const_ref(self) -> "CType":
|
121 |
+
return VectorCType(self.elem.remove_const_ref())
|
122 |
+
|
123 |
+
|
124 |
+
@dataclass(frozen=True)
|
125 |
+
class ArrayCType(CType):
|
126 |
+
elem: "CType"
|
127 |
+
size: int
|
128 |
+
|
129 |
+
def cpp_type(self, *, strip_ref: bool = False) -> str:
|
130 |
+
# Do not pass `strip_ref` recursively.
|
131 |
+
return f"::std::array<{self.elem.cpp_type()},{self.size}>"
|
132 |
+
|
133 |
+
def cpp_type_registration_declarations(self) -> str:
|
134 |
+
return f"::std::array<{self.elem.cpp_type_registration_declarations()},{self.size}>"
|
135 |
+
|
136 |
+
def remove_const_ref(self) -> "CType":
|
137 |
+
return ArrayCType(self.elem.remove_const_ref(), self.size)
|
138 |
+
|
139 |
+
|
140 |
+
@dataclass(frozen=True)
|
141 |
+
class TupleCType(CType):
|
142 |
+
elems: List["CType"]
|
143 |
+
|
144 |
+
def cpp_type(self, *, strip_ref: bool = False) -> str:
|
145 |
+
# Do not pass `strip_ref` recursively.
|
146 |
+
return f'::std::tuple<{",".join([e.cpp_type() for e in self.elems])}>'
|
147 |
+
|
148 |
+
def cpp_type_registration_declarations(self) -> str:
|
149 |
+
return f'::std::tuple<{",".join([e.cpp_type_registration_declarations() for e in self.elems])}>'
|
150 |
+
|
151 |
+
def remove_const_ref(self) -> "CType":
|
152 |
+
return TupleCType([e.remove_const_ref() for e in self.elems])
|
153 |
+
|
154 |
+
|
155 |
+
@dataclass(frozen=True)
|
156 |
+
class MutRefCType(CType):
|
157 |
+
elem: "CType"
|
158 |
+
|
159 |
+
def cpp_type(self, *, strip_ref: bool = False) -> str:
|
160 |
+
if strip_ref:
|
161 |
+
return self.elem.cpp_type(strip_ref=strip_ref)
|
162 |
+
return f"{self.elem.cpp_type()} &"
|
163 |
+
|
164 |
+
def cpp_type_registration_declarations(self) -> str:
|
165 |
+
return f"{self.elem.cpp_type_registration_declarations()} &"
|
166 |
+
|
167 |
+
def remove_const_ref(self) -> "CType":
|
168 |
+
return self.elem.remove_const_ref()
|
169 |
+
|
170 |
+
|
171 |
+
# A NamedCType is short for Named C++ semantic type. A NamedCType represents a C++ type, plus
|
172 |
+
# semantic information about what it represents. For example, consider the
|
173 |
+
# argument "bool pin_memory"; its normal C++ type is "bool", but its C++
|
174 |
+
# semantic type also keeps track that this represents a "pin_memory"; you can't
|
175 |
+
# just use a random other boolean in a context where you need a "pin_memory"!
|
176 |
+
#
|
177 |
+
|
178 |
+
|
179 |
+
@dataclass(frozen=True)
|
180 |
+
class NamedCType:
|
181 |
+
name: ArgName
|
182 |
+
type: CType
|
183 |
+
|
184 |
+
def cpp_type(self, *, strip_ref: bool = False) -> str:
|
185 |
+
return self.type.cpp_type(strip_ref=strip_ref)
|
186 |
+
|
187 |
+
# For BC reasons, we don't want to introduce at:: namespaces to RegistrationDeclarations.yaml
|
188 |
+
# TODO: Kill this when we eventually remove it!
|
189 |
+
def cpp_type_registration_declarations(self) -> str:
|
190 |
+
return self.type.cpp_type_registration_declarations()
|
191 |
+
|
192 |
+
def remove_const_ref(self) -> "NamedCType":
|
193 |
+
return NamedCType(self.name, self.type.remove_const_ref())
|
194 |
+
|
195 |
+
def with_name(self, name: str) -> "NamedCType":
|
196 |
+
return NamedCType(name, self.type)
|
197 |
+
|
198 |
+
|
199 |
+
# A binding represents any C++ binding site for a formal parameter.
|
200 |
+
# We don't distinguish between binding sites for different APIs;
|
201 |
+
# instead, all of the important distinctions are encoded in CType,
|
202 |
+
# which you can use to figure out if a given Binding is appropriate
|
203 |
+
# for use in another context. (See torchgen.api.translate)
|
204 |
+
|
205 |
+
|
206 |
+
@dataclass(frozen=True)
|
207 |
+
class Binding:
|
208 |
+
name: str
|
209 |
+
nctype: NamedCType
|
210 |
+
argument: Union[Argument, TensorOptionsArguments, SelfArgument]
|
211 |
+
# TODO: maybe don't represent default here
|
212 |
+
default: Optional[str] = None
|
213 |
+
|
214 |
+
def rename(self, name: str) -> "Binding":
|
215 |
+
return Binding(
|
216 |
+
name=name,
|
217 |
+
nctype=self.nctype,
|
218 |
+
argument=self.argument,
|
219 |
+
default=self.default,
|
220 |
+
)
|
221 |
+
|
222 |
+
@property
|
223 |
+
def type(self) -> str:
|
224 |
+
return self.nctype.cpp_type()
|
225 |
+
|
226 |
+
def no_default(self) -> "Binding":
|
227 |
+
return Binding(
|
228 |
+
name=self.name,
|
229 |
+
nctype=self.nctype,
|
230 |
+
default=None,
|
231 |
+
argument=self.argument,
|
232 |
+
)
|
233 |
+
|
234 |
+
def decl(self, *, func_ptr_cast: bool = False) -> str:
|
235 |
+
mb_default = ""
|
236 |
+
if self.default is not None:
|
237 |
+
mb_default = f"={self.default}"
|
238 |
+
|
239 |
+
# casting only needs to know the type
|
240 |
+
if func_ptr_cast:
|
241 |
+
return f"{self.type}"
|
242 |
+
else:
|
243 |
+
return f"{self.type} {self.name}{mb_default}"
|
244 |
+
|
245 |
+
# For BC reasons, we don't want to introduce at:: namespaces to RegistrationDeclarations.yaml
|
246 |
+
# TODO: Kill this when we eventually remove it!
|
247 |
+
def decl_registration_declarations(self) -> str:
|
248 |
+
type_s = self.nctype.cpp_type_registration_declarations()
|
249 |
+
mb_default = ""
|
250 |
+
if self.default is not None:
|
251 |
+
mb_default = f"={self.default}"
|
252 |
+
return f"{type_s} {self.name}{mb_default}"
|
253 |
+
|
254 |
+
def defn(self) -> str:
|
255 |
+
return f"{self.type} {self.name}"
|
256 |
+
|
257 |
+
def with_name(self, name: str) -> "Binding":
|
258 |
+
return Binding(
|
259 |
+
name=name, nctype=self.nctype, argument=self.argument, default=self.default
|
260 |
+
)
|
261 |
+
|
262 |
+
|
263 |
+
# An Expr is a C++ expression. It has a C++ string representing its syntax,
|
264 |
+
# as well as a CType saying what it provides.
|
265 |
+
|
266 |
+
|
267 |
+
@dataclass(frozen=True)
|
268 |
+
class Expr:
|
269 |
+
expr: str
|
270 |
+
type: NamedCType
|
venv/lib/python3.10/site-packages/torchgen/api/ufunc.py
ADDED
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass
|
2 |
+
from typing import List, Optional
|
3 |
+
|
4 |
+
import torchgen.api.types as api_types
|
5 |
+
|
6 |
+
from torchgen.api import cpp, structured
|
7 |
+
from torchgen.api.types import (
|
8 |
+
ArgName,
|
9 |
+
BaseCppType,
|
10 |
+
BaseCType,
|
11 |
+
Binding,
|
12 |
+
ConstRefCType,
|
13 |
+
CType,
|
14 |
+
NamedCType,
|
15 |
+
scalarT,
|
16 |
+
)
|
17 |
+
from torchgen.model import (
|
18 |
+
Argument,
|
19 |
+
BaseTy,
|
20 |
+
BaseType,
|
21 |
+
DispatchKey,
|
22 |
+
FunctionSchema,
|
23 |
+
NativeFunctionsGroup,
|
24 |
+
Type,
|
25 |
+
)
|
26 |
+
|
27 |
+
|
28 |
+
def schema_kernel_name(func: FunctionSchema, dispatch_key: DispatchKey) -> str:
|
29 |
+
assert func.is_out_fn(), "ufunc.kernel_name should only be invoked on out schemas"
|
30 |
+
return f"ufunc_{func.name.name}_{dispatch_key}"
|
31 |
+
|
32 |
+
|
33 |
+
def kernel_name(g: NativeFunctionsGroup, dispatch_key: DispatchKey) -> str:
|
34 |
+
return schema_kernel_name(g.out.func, dispatch_key)
|
35 |
+
|
36 |
+
|
37 |
+
# Tensors are omitted (as they are stored in TensorIterator), everything else is
|
38 |
+
# passed along (technically, we can pass tensors along too, it just wastes
|
39 |
+
# argument registers)
|
40 |
+
#
|
41 |
+
# NB: used for CPU only
|
42 |
+
def dispatchstub_type(t: Type, *, binds: ArgName) -> Optional[NamedCType]:
|
43 |
+
# Dispatch stubs are always plain ints
|
44 |
+
r = cpp.valuetype_type(t, binds=binds, symint=False)
|
45 |
+
if r is not None:
|
46 |
+
return r
|
47 |
+
|
48 |
+
if t == BaseType(BaseTy.Scalar):
|
49 |
+
return NamedCType(binds, ConstRefCType(BaseCType(scalarT)))
|
50 |
+
elif t == BaseType(BaseTy.Tensor):
|
51 |
+
return None
|
52 |
+
else:
|
53 |
+
raise AssertionError(f"unrecognized type {repr(t)}")
|
54 |
+
|
55 |
+
|
56 |
+
def opmath_type(scalar_t: BaseCppType) -> BaseCppType:
|
57 |
+
if scalar_t == api_types.scalar_t:
|
58 |
+
return api_types.opmath_t
|
59 |
+
raise NotImplementedError
|
60 |
+
|
61 |
+
|
62 |
+
# NB: Tensors in constructor are stored in opmath_t, not scalar_t
|
63 |
+
# because Tensor in constructor = its a scalar tensor partially applied =
|
64 |
+
# it can be higher precision and we want to compute in that higher precision
|
65 |
+
#
|
66 |
+
# NB: CUDA only
|
67 |
+
def ufunctor_ctor_type(t: Type, *, binds: ArgName, scalar_t: BaseCppType) -> NamedCType:
|
68 |
+
r = cpp.valuetype_type(t, binds=binds, symint=False)
|
69 |
+
if r is not None:
|
70 |
+
return r
|
71 |
+
|
72 |
+
if t == BaseType(BaseTy.Scalar):
|
73 |
+
return NamedCType(binds, BaseCType(opmath_type(scalar_t)))
|
74 |
+
elif t == BaseType(BaseTy.Tensor):
|
75 |
+
return NamedCType(binds, BaseCType(opmath_type(scalar_t)))
|
76 |
+
else:
|
77 |
+
raise AssertionError(f"unrecognized type {repr(t)}")
|
78 |
+
|
79 |
+
|
80 |
+
# Only Tensors ever get passed directly to operator()
|
81 |
+
#
|
82 |
+
# NB: CUDA only
|
83 |
+
# (Actually, this works for CPU too)
|
84 |
+
def ufunctor_apply_type(
|
85 |
+
t: Type, *, binds: ArgName, scalar_t: BaseCppType
|
86 |
+
) -> NamedCType:
|
87 |
+
if t == BaseType(BaseTy.Tensor):
|
88 |
+
return NamedCType(binds, BaseCType(scalar_t))
|
89 |
+
else:
|
90 |
+
raise AssertionError(f"unrecognized type {repr(t)}")
|
91 |
+
|
92 |
+
|
93 |
+
# The actual ufunc template function the user writes. Everything here
|
94 |
+
# is done in the computation type. compute_t is opmath_t in CUDA and scalar_t
|
95 |
+
# in CPU
|
96 |
+
def ufunc_type(t: Type, *, binds: ArgName, compute_t: CType) -> NamedCType:
|
97 |
+
r = cpp.valuetype_type(t, binds=binds, symint=False)
|
98 |
+
if r is not None:
|
99 |
+
return r
|
100 |
+
|
101 |
+
if t == BaseType(BaseTy.Scalar):
|
102 |
+
return NamedCType(binds, compute_t)
|
103 |
+
elif t == BaseType(BaseTy.Tensor):
|
104 |
+
return NamedCType(binds, compute_t)
|
105 |
+
else:
|
106 |
+
raise AssertionError(f"unrecognized type {repr(t)}")
|
107 |
+
|
108 |
+
|
109 |
+
def ufunctor_ctor_argument(a: Argument, scalar_t: BaseCppType) -> Binding:
|
110 |
+
return Binding(
|
111 |
+
nctype=ufunctor_ctor_type(a.type, binds=a.name, scalar_t=scalar_t),
|
112 |
+
name=a.name,
|
113 |
+
default=None,
|
114 |
+
argument=a,
|
115 |
+
)
|
116 |
+
|
117 |
+
|
118 |
+
def ufunctor_apply_argument(a: Argument, scalar_t: BaseCppType) -> Binding:
|
119 |
+
return Binding(
|
120 |
+
nctype=ufunctor_apply_type(a.type, binds=a.name, scalar_t=scalar_t),
|
121 |
+
name=a.name,
|
122 |
+
default=None,
|
123 |
+
argument=a,
|
124 |
+
)
|
125 |
+
|
126 |
+
|
127 |
+
def ufunc_argument(a: Argument, compute_t: CType) -> Binding:
|
128 |
+
return Binding(
|
129 |
+
nctype=ufunc_type(a.type, binds=a.name, compute_t=compute_t),
|
130 |
+
name=a.name,
|
131 |
+
default=None,
|
132 |
+
argument=a,
|
133 |
+
)
|
134 |
+
|
135 |
+
|
136 |
+
@dataclass(frozen=True)
|
137 |
+
class UfunctorBindings:
|
138 |
+
ctor: List[Binding]
|
139 |
+
apply: List[Binding]
|
140 |
+
|
141 |
+
|
142 |
+
# ufunctors are a CUDA-only concept representing functors that take some of
|
143 |
+
# their arguments on a host-side constructor, and the rest in the device-side
|
144 |
+
# apply. E.g.,
|
145 |
+
#
|
146 |
+
# template <typename scalar_t>
|
147 |
+
# struct CUDAFunctorOnSelf_add {
|
148 |
+
# using opmath_t = at::opmath_type<scalar_t>;
|
149 |
+
# opmath_t other_;
|
150 |
+
# opmath_t alpha_;
|
151 |
+
# CUDAFunctorOnSelf_add(opmath_t other, opmath_t alpha) : other_(other), alpha_(alpha) {}
|
152 |
+
# __device__ scalar_t operator()(scalar_t self) {
|
153 |
+
# return ufunc::add(static_cast<opmath_t>(self), other_, alpha_);
|
154 |
+
# }
|
155 |
+
# };
|
156 |
+
#
|
157 |
+
# The ctor refers to the constructor CUDAFunctorOnSelf_add, while apply refers
|
158 |
+
# to the operator() definition
|
159 |
+
def ufunctor_arguments(
|
160 |
+
g: NativeFunctionsGroup, *, scalar_tensor_idx: Optional[int], scalar_t: BaseCppType
|
161 |
+
) -> UfunctorBindings:
|
162 |
+
ctor = []
|
163 |
+
apply = []
|
164 |
+
for a in g.functional.func.arguments.flat_non_out:
|
165 |
+
if a.type.is_tensor_like():
|
166 |
+
if scalar_tensor_idx == 0:
|
167 |
+
# put it in the ctor anyway
|
168 |
+
ctor.append(ufunctor_ctor_argument(a, scalar_t=scalar_t))
|
169 |
+
scalar_tensor_idx = None
|
170 |
+
else:
|
171 |
+
if scalar_tensor_idx is not None:
|
172 |
+
scalar_tensor_idx -= 1
|
173 |
+
apply.append(ufunctor_apply_argument(a, scalar_t=scalar_t))
|
174 |
+
else:
|
175 |
+
ctor.append(ufunctor_ctor_argument(a, scalar_t=scalar_t))
|
176 |
+
assert scalar_tensor_idx is None
|
177 |
+
return UfunctorBindings(ctor=ctor, apply=apply)
|
178 |
+
|
179 |
+
|
180 |
+
# ufuncs are the inner loop template functions that you wrote in ufunc/add.h
|
181 |
+
# which do the actual computation in question. E.g.,
|
182 |
+
#
|
183 |
+
# template <typename T>
|
184 |
+
# C10_HOST_DEVICE T add(T self, T other, T alpha) __ubsan_ignore_undefined__ {
|
185 |
+
# return self + alpha * other;
|
186 |
+
# }
|
187 |
+
#
|
188 |
+
# In this file, we refer to T as compute_t which is bound by caller
|
189 |
+
def ufunc_arguments(g: NativeFunctionsGroup, *, compute_t: CType) -> List[Binding]:
|
190 |
+
return [
|
191 |
+
ufunc_argument(a, compute_t=compute_t)
|
192 |
+
for a in g.functional.func.arguments.flat_non_out
|
193 |
+
]
|
194 |
+
|
195 |
+
|
196 |
+
# Stubs are the DispatchStub trampolines that CPU kernels use to get to their
|
197 |
+
# vectorized versions. E.g.,
|
198 |
+
#
|
199 |
+
# using structured_binary_fn_alpha = void(*)(TensorIteratorBase&, const Scalar& alpha);
|
200 |
+
# DECLARE_DISPATCH(structured_binary_fn_alpha, add_stub);
|
201 |
+
def stub_arguments(g: NativeFunctionsGroup) -> List[Binding]:
|
202 |
+
# stubs drop all tensor arguments (they are implicit in the TensorIterator
|
203 |
+
# argument and keep everything else)
|
204 |
+
return [
|
205 |
+
r
|
206 |
+
for a in g.out.func.arguments.flat_non_out
|
207 |
+
if not a.type.is_tensor_like()
|
208 |
+
for r in structured.argument(a)
|
209 |
+
]
|
venv/lib/python3.10/site-packages/torchgen/api/unboxing.py
ADDED
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, Tuple
|
2 |
+
|
3 |
+
from torchgen.api import cpp
|
4 |
+
from torchgen.api.types import Binding, CppSignatureGroup, CType
|
5 |
+
from torchgen.model import (
|
6 |
+
Argument,
|
7 |
+
BaseTy,
|
8 |
+
BaseType,
|
9 |
+
ListType,
|
10 |
+
NativeFunction,
|
11 |
+
OptionalType,
|
12 |
+
Type,
|
13 |
+
)
|
14 |
+
|
15 |
+
# This file generates the code for unboxing wrappers, i.e., the glue logic to unbox a boxed operator and convert the
|
16 |
+
# ivalues from stack to correct arguments to the unboxed kernel, based on corresponding JIT schema. This codegen is
|
17 |
+
# an alternative way to generate unboxing wrappers similar to the existing C++ metaprogramming approach but gets the
|
18 |
+
# job done statically. These generated unboxing wrappers will be useful under the scenario where we need to register
|
19 |
+
# a fixed set of operators known at compile time and thus can save some time in runtime initialization phase.
|
20 |
+
#
|
21 |
+
# Here's an example on how the codegen works:
|
22 |
+
#
|
23 |
+
# - Function Schema (source of truth)
|
24 |
+
#
|
25 |
+
# aten::empty.names(int[] size, *, Dimname[]? names,
|
26 |
+
# ScalarType? dtype=None, Layout? layout=None,
|
27 |
+
# Device? device=None, bool? pin_memory=None,
|
28 |
+
# MemoryFormat? memory_format=None) -> Tensor
|
29 |
+
# - Argument Conversion
|
30 |
+
# Generates C++ code to convert an ivalue (from stack) to its underlying C++ type.
|
31 |
+
# - int[] size
|
32 |
+
# ```cpp
|
33 |
+
# const c10::List<c10::IValue> size_list_in = (std::move(peek(stack, 0, 7))).toList();
|
34 |
+
#
|
35 |
+
# std::vector<int64_t> size_vec;
|
36 |
+
# for (c10::IValue size_elem: size_list_in) {
|
37 |
+
# int64_t size_base = size_elem.to<int64_t>();
|
38 |
+
# size_vec.push_back(size_base);
|
39 |
+
# }
|
40 |
+
# at::ArrayRef<int64_t> size_list_out(size_vec);
|
41 |
+
# ~~~~~~~~~~~~~ <-- The converted argument from ivalues in the stack.
|
42 |
+
# Will be passed to unboxed kernel.
|
43 |
+
# ```
|
44 |
+
# - Dimname[]? names
|
45 |
+
# ```cpp
|
46 |
+
# c10::optional<c10::IValue> names_opt = (std::move(peek(stack, 1, 7))).toOptional<c10::IValue>();
|
47 |
+
# c10::optional<at::ArrayRef<at::Dimname>> names_opt_out;
|
48 |
+
# if (names_opt.has_value()) {
|
49 |
+
# ~~~~~~~~~~~ <-- Unwrapping optional shell
|
50 |
+
# const c10::IValue names_opt_in = names_opt.value();
|
51 |
+
# const c10::List<c10::IValue> names_list_in = names_opt_in.toList();
|
52 |
+
#
|
53 |
+
# std::vector<at::Dimname> names_vec;
|
54 |
+
# for (c10::IValue names_elem: names_list_in) {
|
55 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~ <-- Unrolling list, then convert elements one by one.
|
56 |
+
# at::Dimname names_base = names_elem.to<at::Dimname>();
|
57 |
+
# names_vec.push_back(names_base);
|
58 |
+
# }
|
59 |
+
# at::ArrayRef<at::Dimname> names_list_out(names_vec);
|
60 |
+
#
|
61 |
+
# names_opt_out = c10::optional<at::ArrayRef<at::Dimname>>(names_list_out);
|
62 |
+
# } else {
|
63 |
+
# names_opt_out = c10::optional<at::ArrayRef<at::Dimname>>();
|
64 |
+
# }
|
65 |
+
# ```
|
66 |
+
# - ScalarType? dtype (similarly for the rest of the arguments)
|
67 |
+
# ```cpp
|
68 |
+
# c10::optional<c10::IValue> dtype_opt = (std::move(peek(stack, 2, 7))).toOptional<c10::IValue>();
|
69 |
+
# c10::optional<at::ScalarType> dtype_opt_out;
|
70 |
+
# if (dtype_opt.has_value()) {
|
71 |
+
# const c10::IValue dtype_opt_in = dtype_opt.value();
|
72 |
+
# at::ScalarType dtype_base = dtype_opt_in.to<at::ScalarType>();
|
73 |
+
# ~~~~~~~~~~~~~~~~~~~~ <-- For base types, convert ivalue to it
|
74 |
+
# directly using ".to<T>()" API.
|
75 |
+
# dtype_opt_out = c10::optional<at::ScalarType>(dtype_base);
|
76 |
+
# } else {
|
77 |
+
# dtype_opt_out = c10::optional<at::ScalarType>();
|
78 |
+
# }
|
79 |
+
# ```
|
80 |
+
#
|
81 |
+
# - Unboxed Kernel Call
|
82 |
+
# ```cpp
|
83 |
+
# auto result_ = torch::empty(
|
84 |
+
# size_list_out,
|
85 |
+
# names_opt_out,
|
86 |
+
# options,
|
87 |
+
# memory_format_opt_out
|
88 |
+
# );
|
89 |
+
# ```
|
90 |
+
#
|
91 |
+
# - Push Result Back to Stack
|
92 |
+
# ```cpp
|
93 |
+
# drop(stack, 7);
|
94 |
+
# pack(stack, std::move(result_));
|
95 |
+
# ```
|
96 |
+
connector = "\n\t"
|
97 |
+
|
98 |
+
|
99 |
+
# Return unboxing function name for a NativeFunction
|
100 |
+
def name(f: NativeFunction) -> str:
|
101 |
+
return f.func.name.unambiguous_name()
|
102 |
+
|
103 |
+
|
104 |
+
# Convert all the arguments in a NativeFunction to C++ code
|
105 |
+
def convert_arguments(f: NativeFunction) -> Tuple[List[Binding], List[str]]:
|
106 |
+
# we need the 'self' argument so method needs to be False
|
107 |
+
args = (
|
108 |
+
CppSignatureGroup.from_native_function(f, method=False)
|
109 |
+
.most_faithful_signature()
|
110 |
+
.arguments()
|
111 |
+
)
|
112 |
+
code_list = [
|
113 |
+
f"c10::IValue {args[i].name} = std::move(peek(stack, {i}, {len(args)}));"
|
114 |
+
for i in range(len(args))
|
115 |
+
] + [""]
|
116 |
+
binding_list = []
|
117 |
+
for arg in args:
|
118 |
+
# expecting only Argument
|
119 |
+
if not isinstance(arg.argument, Argument):
|
120 |
+
raise Exception(
|
121 |
+
f"Unexpected argument type, expecting `Argument` but got {arg}"
|
122 |
+
)
|
123 |
+
argument: Argument = arg.argument
|
124 |
+
unboxed_name, _, code, decl = argumenttype_ivalue_convert(
|
125 |
+
argument.type,
|
126 |
+
argument.name,
|
127 |
+
mutable=argument.is_write,
|
128 |
+
)
|
129 |
+
code_list.extend(decl)
|
130 |
+
code_list.extend(code)
|
131 |
+
binding_list.append(arg.with_name(unboxed_name))
|
132 |
+
return binding_list, code_list
|
133 |
+
|
134 |
+
|
135 |
+
# Takes in the type, name and mutability corresponding to an argument, and generates a tuple of:
|
136 |
+
# (1) the C++ code necessary to unbox the argument
|
137 |
+
# (2) A Binding corresponding to the newly created unboxed variable, including variable name and its CType
|
138 |
+
def argumenttype_ivalue_convert(
|
139 |
+
t: Type, arg_name: str, *, mutable: bool = False
|
140 |
+
) -> Tuple[str, CType, List[str], List[str]]:
|
141 |
+
# Unboxing is for mobile, which doesn't care about SymInts
|
142 |
+
ctype = cpp.argumenttype_type(
|
143 |
+
t=t, mutable=mutable, binds=arg_name, symint=False
|
144 |
+
).type
|
145 |
+
|
146 |
+
if isinstance(t, BaseType):
|
147 |
+
out_name = f"{arg_name}_base"
|
148 |
+
code, decl = _gen_code_base_type(
|
149 |
+
arg_name=arg_name, out_name=out_name, ctype=ctype
|
150 |
+
)
|
151 |
+
elif isinstance(t, OptionalType):
|
152 |
+
out_name = f"{arg_name}_opt_out"
|
153 |
+
code, decl = _gen_code_optional_type(
|
154 |
+
arg_name=arg_name,
|
155 |
+
out_name=out_name,
|
156 |
+
t=t,
|
157 |
+
ctype=ctype,
|
158 |
+
)
|
159 |
+
elif isinstance(t, ListType):
|
160 |
+
out_name = f"{arg_name}_list_out"
|
161 |
+
code, decl = _gen_code_list_type(
|
162 |
+
arg_name=arg_name,
|
163 |
+
out_name=out_name,
|
164 |
+
t=t,
|
165 |
+
ctype=ctype,
|
166 |
+
)
|
167 |
+
else:
|
168 |
+
raise Exception(f"Cannot handle type {t}. arg_name: {arg_name}")
|
169 |
+
return out_name, ctype, code, decl
|
170 |
+
|
171 |
+
|
172 |
+
def _gen_code_base_type(
|
173 |
+
arg_name: str, out_name: str, ctype: CType
|
174 |
+
) -> Tuple[List[str], List[str]]:
|
175 |
+
return [
|
176 |
+
f"{ctype.cpp_type(strip_ref=True)} {out_name} = {arg_name}.to<{ctype.cpp_type(strip_ref=True)}>();"
|
177 |
+
], []
|
178 |
+
|
179 |
+
|
180 |
+
def _gen_code_optional_type(
|
181 |
+
arg_name: str, out_name: str, t: OptionalType, ctype: CType
|
182 |
+
) -> Tuple[List[str], List[str]]:
|
183 |
+
in_name = f"{arg_name}_opt_in"
|
184 |
+
res_name, _, res_code, decl = argumenttype_ivalue_convert(t.elem, in_name)
|
185 |
+
return (
|
186 |
+
f"""
|
187 |
+
c10::optional<c10::IValue> {arg_name}_opt = {arg_name}.toOptional<c10::IValue>();
|
188 |
+
{ctype.cpp_type(strip_ref=True)} {out_name};
|
189 |
+
if ({arg_name}_opt.has_value()) {{
|
190 |
+
const c10::IValue {in_name} = {arg_name}_opt.value();
|
191 |
+
{connector.join(res_code)}
|
192 |
+
{out_name} = {ctype.cpp_type(strip_ref=True)}({res_name});
|
193 |
+
}} else {{
|
194 |
+
{out_name} = {ctype.cpp_type(strip_ref=True)}();
|
195 |
+
}}
|
196 |
+
""".split(
|
197 |
+
"\n"
|
198 |
+
),
|
199 |
+
decl,
|
200 |
+
)
|
201 |
+
|
202 |
+
|
203 |
+
def _gen_code_list_type(
|
204 |
+
arg_name: str, out_name: str, t: ListType, ctype: CType
|
205 |
+
) -> Tuple[List[str], List[str]]:
|
206 |
+
in_name = f"{arg_name}_list_in"
|
207 |
+
elem_name = f"{arg_name}_elem"
|
208 |
+
code = [f"const c10::List<c10::IValue> {in_name} = {arg_name}.toList();"]
|
209 |
+
res_name, res_ctype, res_code, decl = argumenttype_ivalue_convert(t.elem, elem_name)
|
210 |
+
# handle list type with size, e.g., bool[4]
|
211 |
+
if isinstance(t.elem, BaseType) and t.elem.name == BaseTy.bool and t.size:
|
212 |
+
code.extend(
|
213 |
+
f"""
|
214 |
+
{ctype.cpp_type(strip_ref=True)} {out_name} = as_array<{res_ctype.cpp_type(strip_ref=True)}, {t.size}>({in_name});
|
215 |
+
""".split(
|
216 |
+
"\n"
|
217 |
+
)
|
218 |
+
)
|
219 |
+
# we have to use c10::List for optional element. e.g., Tensor?[] -> c10::List<c10::optional<at::Tensor>>
|
220 |
+
elif isinstance(t.elem, OptionalType):
|
221 |
+
code.extend(
|
222 |
+
f"""
|
223 |
+
{ctype.cpp_type(strip_ref=True)} {out_name};
|
224 |
+
for (c10::IValue {elem_name}: {in_name}) {{
|
225 |
+
{connector.join(res_code)}
|
226 |
+
{out_name}.push_back({res_name});
|
227 |
+
}}
|
228 |
+
""".split(
|
229 |
+
"\n"
|
230 |
+
)
|
231 |
+
)
|
232 |
+
else:
|
233 |
+
# use ArrayRef as default.
|
234 |
+
vec_name = arg_name + "_vec"
|
235 |
+
# need to bring vector instantiation out of scope so that ArrayRef has valid data
|
236 |
+
decl.append(f"std::vector<{res_ctype.cpp_type(strip_ref=True)}> {vec_name};")
|
237 |
+
code.extend(
|
238 |
+
f"""
|
239 |
+
for (c10::IValue {elem_name}: {in_name}) {{
|
240 |
+
{connector.join(res_code)}
|
241 |
+
{vec_name}.push_back({res_name});
|
242 |
+
}}
|
243 |
+
{ctype.cpp_type(strip_ref=True)} {out_name}({vec_name});
|
244 |
+
""".split(
|
245 |
+
"\n"
|
246 |
+
)
|
247 |
+
)
|
248 |
+
return code, decl
|
venv/lib/python3.10/site-packages/torchgen/code_template.py
ADDED
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
from typing import Mapping, Match, Optional, Sequence
|
3 |
+
|
4 |
+
# match $identifier or ${identifier} and replace with value in env
|
5 |
+
# If this identifier is at the beginning of whitespace on a line
|
6 |
+
# and its value is a list then it is treated as
|
7 |
+
# block substitution by indenting to that depth and putting each element
|
8 |
+
# of the list on its own line
|
9 |
+
# if the identifier is on a line starting with non-whitespace and a list
|
10 |
+
# then it is comma separated ${,foo} will insert a comma before the list
|
11 |
+
# if this list is not empty and ${foo,} will insert one after.
|
12 |
+
|
13 |
+
|
14 |
+
class CodeTemplate:
|
15 |
+
substitution_str = r"(^[^\n\S]*)?\$([^\d\W]\w*|\{,?[^\d\W]\w*\,?})"
|
16 |
+
substitution = re.compile(substitution_str, re.MULTILINE)
|
17 |
+
|
18 |
+
pattern: str
|
19 |
+
filename: str
|
20 |
+
|
21 |
+
@staticmethod
|
22 |
+
def from_file(filename: str) -> "CodeTemplate":
|
23 |
+
with open(filename) as f:
|
24 |
+
return CodeTemplate(f.read(), filename)
|
25 |
+
|
26 |
+
def __init__(self, pattern: str, filename: str = "") -> None:
|
27 |
+
self.pattern = pattern
|
28 |
+
self.filename = filename
|
29 |
+
|
30 |
+
def substitute(
|
31 |
+
self, env: Optional[Mapping[str, object]] = None, **kwargs: object
|
32 |
+
) -> str:
|
33 |
+
if env is None:
|
34 |
+
env = {}
|
35 |
+
|
36 |
+
def lookup(v: str) -> object:
|
37 |
+
assert env is not None
|
38 |
+
return kwargs[v] if v in kwargs else env[v]
|
39 |
+
|
40 |
+
def indent_lines(indent: str, v: Sequence[object]) -> str:
|
41 |
+
return "".join(
|
42 |
+
[indent + l + "\n" for e in v for l in str(e).splitlines()]
|
43 |
+
).rstrip()
|
44 |
+
|
45 |
+
def replace(match: Match[str]) -> str:
|
46 |
+
indent = match.group(1)
|
47 |
+
key = match.group(2)
|
48 |
+
comma_before = ""
|
49 |
+
comma_after = ""
|
50 |
+
if key[0] == "{":
|
51 |
+
key = key[1:-1]
|
52 |
+
if key[0] == ",":
|
53 |
+
comma_before = ", "
|
54 |
+
key = key[1:]
|
55 |
+
if key[-1] == ",":
|
56 |
+
comma_after = ", "
|
57 |
+
key = key[:-1]
|
58 |
+
v = lookup(key)
|
59 |
+
if indent is not None:
|
60 |
+
if not isinstance(v, list):
|
61 |
+
v = [v]
|
62 |
+
return indent_lines(indent, v)
|
63 |
+
elif isinstance(v, list):
|
64 |
+
middle = ", ".join([str(x) for x in v])
|
65 |
+
if len(v) == 0:
|
66 |
+
return middle
|
67 |
+
return comma_before + middle + comma_after
|
68 |
+
else:
|
69 |
+
return str(v)
|
70 |
+
|
71 |
+
return self.substitution.sub(replace, self.pattern)
|
72 |
+
|
73 |
+
|
74 |
+
if __name__ == "__main__":
|
75 |
+
c = CodeTemplate(
|
76 |
+
"""\
|
77 |
+
int foo($args) {
|
78 |
+
|
79 |
+
$bar
|
80 |
+
$bar
|
81 |
+
$a+$b
|
82 |
+
}
|
83 |
+
int commatest(int a${,stuff})
|
84 |
+
int notest(int a${,empty,})
|
85 |
+
"""
|
86 |
+
)
|
87 |
+
print(
|
88 |
+
c.substitute(
|
89 |
+
args=["hi", 8],
|
90 |
+
bar=["what", 7],
|
91 |
+
a=3,
|
92 |
+
b=4,
|
93 |
+
stuff=["things...", "others"],
|
94 |
+
empty=[],
|
95 |
+
)
|
96 |
+
)
|
venv/lib/python3.10/site-packages/torchgen/context.py
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import contextlib
|
2 |
+
|
3 |
+
import functools
|
4 |
+
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, TypeVar, Union
|
5 |
+
|
6 |
+
import torchgen.local as local
|
7 |
+
from torchgen.model import (
|
8 |
+
BackendIndex,
|
9 |
+
DispatchKey,
|
10 |
+
NativeFunction,
|
11 |
+
NativeFunctionsGroup,
|
12 |
+
NativeFunctionsViewGroup,
|
13 |
+
)
|
14 |
+
from torchgen.utils import context, S, T
|
15 |
+
|
16 |
+
# Helper functions for defining generators on things in the model
|
17 |
+
|
18 |
+
F = TypeVar(
|
19 |
+
"F",
|
20 |
+
NativeFunction,
|
21 |
+
NativeFunctionsGroup,
|
22 |
+
NativeFunctionsViewGroup,
|
23 |
+
Union[NativeFunction, NativeFunctionsGroup],
|
24 |
+
Union[NativeFunction, NativeFunctionsViewGroup],
|
25 |
+
)
|
26 |
+
|
27 |
+
F2 = TypeVar(
|
28 |
+
"F2",
|
29 |
+
NativeFunction,
|
30 |
+
NativeFunctionsGroup,
|
31 |
+
Optional[NativeFunction],
|
32 |
+
bool,
|
33 |
+
str,
|
34 |
+
)
|
35 |
+
|
36 |
+
F3 = TypeVar("F3", Tuple[NativeFunction, Any], List[NativeFunction])
|
37 |
+
|
38 |
+
|
39 |
+
@contextlib.contextmanager
|
40 |
+
def native_function_manager(
|
41 |
+
g: Union[NativeFunctionsGroup, NativeFunctionsViewGroup, NativeFunction]
|
42 |
+
) -> Iterator[None]:
|
43 |
+
if isinstance(g, NativeFunctionsGroup):
|
44 |
+
# By default, we associate all errors with structured native functions
|
45 |
+
# with the out variant. In some cases, it might be better to have
|
46 |
+
# a more specific place to hang things; if so, use
|
47 |
+
# native_function_manager again on the inside
|
48 |
+
f = g.out
|
49 |
+
elif isinstance(g, NativeFunctionsViewGroup):
|
50 |
+
# We associate errors with the view operator
|
51 |
+
f = g.view
|
52 |
+
else:
|
53 |
+
f = g
|
54 |
+
with context(lambda: f"in native_functions.yaml line {f.loc}:\n {f.func}"):
|
55 |
+
with local.parametrize(
|
56 |
+
use_const_ref_for_mutable_tensors=f.use_const_ref_for_mutable_tensors,
|
57 |
+
use_ilistref_for_tensor_lists=f.part_of_structured_group,
|
58 |
+
):
|
59 |
+
yield
|
60 |
+
|
61 |
+
|
62 |
+
# Given a function that operates on NativeFunction, wrap it into a new function
|
63 |
+
# that sets some appropriate context managers for that native function.
|
64 |
+
# YOU MUST WRAP FUNCTIONS IN THIS for calls to api modules to be sound
|
65 |
+
# (you will get an error if we try to access the local variables without having
|
66 |
+
# set them).
|
67 |
+
def with_native_function(func: Callable[[F], T]) -> Callable[[F], T]:
|
68 |
+
@functools.wraps(func)
|
69 |
+
def wrapper(f: F) -> T:
|
70 |
+
with native_function_manager(f):
|
71 |
+
return func(f)
|
72 |
+
|
73 |
+
return wrapper
|
74 |
+
|
75 |
+
|
76 |
+
def with_native_function_and(func: Callable[[F, F2], T]) -> Callable[[F, F2], T]:
|
77 |
+
@functools.wraps(func)
|
78 |
+
def wrapper(f: F, f2: F2) -> T:
|
79 |
+
# The first native_function is assumed to be the one with the appropriate context.
|
80 |
+
with native_function_manager(f):
|
81 |
+
return func(f, f2)
|
82 |
+
|
83 |
+
return wrapper
|
84 |
+
|
85 |
+
|
86 |
+
def method_with_native_function(func: Callable[[S, F], T]) -> Callable[[S, F], T]:
|
87 |
+
@functools.wraps(func)
|
88 |
+
def wrapper(slf: S, f: F) -> T:
|
89 |
+
with native_function_manager(f):
|
90 |
+
return func(slf, f)
|
91 |
+
|
92 |
+
return wrapper
|
93 |
+
|
94 |
+
|
95 |
+
def method_with_nested_native_function(
|
96 |
+
func: Callable[[S, F3], T]
|
97 |
+
) -> Callable[[S, F3], T]:
|
98 |
+
@functools.wraps(func)
|
99 |
+
def wrapper(slf: S, f: F3) -> T:
|
100 |
+
with native_function_manager(f[0]):
|
101 |
+
return func(slf, f)
|
102 |
+
|
103 |
+
return wrapper
|
104 |
+
|
105 |
+
|
106 |
+
# Convenience decorator for functions that explicitly take in a BackendIndex,
|
107 |
+
# instead of indirectly taking one in as a closure
|
108 |
+
def with_native_function_and_index(
|
109 |
+
func: Callable[[F, BackendIndex], T]
|
110 |
+
) -> Callable[[F, BackendIndex], T]:
|
111 |
+
@functools.wraps(func)
|
112 |
+
def wrapper(f: F, backend_index: BackendIndex) -> T:
|
113 |
+
with native_function_manager(f):
|
114 |
+
return func(f, backend_index)
|
115 |
+
|
116 |
+
return wrapper
|
117 |
+
|
118 |
+
|
119 |
+
# Convenience decorator for functions that explicitly take in a Dict of BackendIndices
|
120 |
+
def with_native_function_and_indices(
|
121 |
+
func: Callable[[F, Dict[DispatchKey, BackendIndex]], T]
|
122 |
+
) -> Callable[[F, Dict[DispatchKey, BackendIndex]], T]:
|
123 |
+
@functools.wraps(func)
|
124 |
+
def wrapper(f: F, backend_indices: Dict[DispatchKey, BackendIndex]) -> T:
|
125 |
+
with native_function_manager(f):
|
126 |
+
return func(f, backend_indices)
|
127 |
+
|
128 |
+
return wrapper
|
venv/lib/python3.10/site-packages/torchgen/dest/__init__.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .lazy_ir import (
|
2 |
+
generate_non_native_lazy_ir_nodes as generate_non_native_lazy_ir_nodes,
|
3 |
+
GenLazyIR as GenLazyIR,
|
4 |
+
GenLazyNativeFuncDefinition as GenLazyNativeFuncDefinition,
|
5 |
+
GenLazyShapeInferenceDefinition as GenLazyShapeInferenceDefinition,
|
6 |
+
)
|
7 |
+
from .native_functions import (
|
8 |
+
compute_native_function_declaration as compute_native_function_declaration,
|
9 |
+
)
|
10 |
+
from .register_dispatch_key import (
|
11 |
+
gen_registration_headers as gen_registration_headers,
|
12 |
+
gen_registration_helpers as gen_registration_helpers,
|
13 |
+
RegisterDispatchKey as RegisterDispatchKey,
|
14 |
+
)
|
15 |
+
from .ufunc import (
|
16 |
+
compute_ufunc_cpu as compute_ufunc_cpu,
|
17 |
+
compute_ufunc_cpu_kernel as compute_ufunc_cpu_kernel,
|
18 |
+
compute_ufunc_cuda as compute_ufunc_cuda,
|
19 |
+
)
|
venv/lib/python3.10/site-packages/torchgen/dest/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (668 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torchgen/dest/__pycache__/lazy_ir.cpython-310.pyc
ADDED
Binary file (23.4 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/dest/__pycache__/lazy_ts_lowering.cpython-310.pyc
ADDED
Binary file (2.19 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/dest/__pycache__/native_functions.cpython-310.pyc
ADDED
Binary file (2.24 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/dest/__pycache__/register_dispatch_key.cpython-310.pyc
ADDED
Binary file (23.9 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/dest/__pycache__/ufunc.cpython-310.pyc
ADDED
Binary file (14 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/dest/lazy_ir.py
ADDED
@@ -0,0 +1,707 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import itertools
|
2 |
+
from abc import ABC
|
3 |
+
from dataclasses import dataclass
|
4 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
5 |
+
|
6 |
+
import torchgen.api.dispatcher as dispatcher
|
7 |
+
from torchgen.api.lazy import (
|
8 |
+
getValueT,
|
9 |
+
isValueType,
|
10 |
+
LazyArgument,
|
11 |
+
LazyIrProperties,
|
12 |
+
LazyIrSchema,
|
13 |
+
tensorListValueT,
|
14 |
+
)
|
15 |
+
from torchgen.api.translate import translate
|
16 |
+
from torchgen.api.types import (
|
17 |
+
BaseCType,
|
18 |
+
Binding,
|
19 |
+
deviceT,
|
20 |
+
DispatcherSignature,
|
21 |
+
kernel_signature,
|
22 |
+
NativeSignature,
|
23 |
+
OptionalCType,
|
24 |
+
VectorCType,
|
25 |
+
)
|
26 |
+
from torchgen.context import method_with_native_function
|
27 |
+
from torchgen.dest.lazy_ts_lowering import ts_lowering_body
|
28 |
+
from torchgen.model import (
|
29 |
+
Argument,
|
30 |
+
BackendIndex,
|
31 |
+
BackendMetadata,
|
32 |
+
BaseTy,
|
33 |
+
BaseType,
|
34 |
+
FunctionSchema,
|
35 |
+
ListType,
|
36 |
+
NativeFunction,
|
37 |
+
NativeFunctionsGroup,
|
38 |
+
)
|
39 |
+
|
40 |
+
|
41 |
+
def node_ctor_arg_rvalue_string(arg: LazyArgument) -> str:
|
42 |
+
"""
|
43 |
+
Given a LazyArgument,
|
44 |
+
generate a c++ string for materializing an rvalue of that arg for passing into
|
45 |
+
a lazy Node constructor.
|
46 |
+
"""
|
47 |
+
|
48 |
+
# TODO: Matching on CType seems wrong; should be matching on Type
|
49 |
+
if isValueType(arg.lazy_type):
|
50 |
+
if isinstance(arg.lazy_type, BaseCType):
|
51 |
+
if arg.is_wrapped_scalar:
|
52 |
+
return f"node_{arg.name}"
|
53 |
+
elif arg.lazy_type.type is tensorListValueT:
|
54 |
+
return f"lazy_{arg.name}_tensorlist"
|
55 |
+
elif arg.is_symint_or_list:
|
56 |
+
return f"GetSymIntValue({arg.name})"
|
57 |
+
return f"lazy_{arg.name}->GetIrValue()"
|
58 |
+
elif isinstance(arg.lazy_type, OptionalCType):
|
59 |
+
if arg.is_symint_or_list:
|
60 |
+
# TODO: I don't understand when you should put lazy_ in the name
|
61 |
+
# or not
|
62 |
+
return f"{arg.name} ? c10::make_optional(GetSymIntValue(*{arg.name})) : c10::nullopt"
|
63 |
+
elif arg.is_wrapped_scalar:
|
64 |
+
return f"node_{arg.name}"
|
65 |
+
return (
|
66 |
+
f"lazy_{arg.name} ? "
|
67 |
+
f"c10::make_optional(lazy_{arg.name}->GetIrValue()) : "
|
68 |
+
"c10::nullopt"
|
69 |
+
)
|
70 |
+
else:
|
71 |
+
raise AssertionError(
|
72 |
+
f"TODO not sure if there are other valid types to handle here ({arg.lazy_type})"
|
73 |
+
)
|
74 |
+
else:
|
75 |
+
# NB: this is here because right now we aren't treating SymInt[] as a
|
76 |
+
# value type; when we do this needs to move above
|
77 |
+
# NB: we cannot test arg.lazy_type as we've already specified it is an
|
78 |
+
# int64_t and so we cannot distinguish between SymInt and int64_t
|
79 |
+
if isinstance(arg.orig_type, ListType) and arg.orig_type.elem == BaseType(
|
80 |
+
BaseTy.SymInt
|
81 |
+
):
|
82 |
+
if arg.symint:
|
83 |
+
return f"GetSymIntArrayRefValue({arg.name})"
|
84 |
+
else:
|
85 |
+
return f"std::vector<int64_t>({arg.name}.begin(), {arg.name}.end())"
|
86 |
+
elif isinstance(arg.lazy_type, VectorCType) and isinstance(
|
87 |
+
arg.lazy_type.elem, BaseCType
|
88 |
+
):
|
89 |
+
return f"std::vector<{arg.lazy_type.elem.type}>({arg.name}.begin(), {arg.name}.end())"
|
90 |
+
elif (
|
91 |
+
isinstance(arg.lazy_type, OptionalCType)
|
92 |
+
and isinstance(arg.lazy_type.elem, VectorCType)
|
93 |
+
and isinstance(arg.lazy_type.elem.elem, BaseCType)
|
94 |
+
):
|
95 |
+
return f"torch::lazy::ToOptionalVector<{arg.lazy_type.elem.elem.type}>({arg.name})"
|
96 |
+
else:
|
97 |
+
return f"{arg.name}"
|
98 |
+
|
99 |
+
|
100 |
+
def node_ctor_inputs(schema: LazyIrSchema) -> str:
|
101 |
+
"""
|
102 |
+
Produce a formatted string with the arguments as passed into the constructor of a node class.
|
103 |
+
"""
|
104 |
+
node_ctor_values = [
|
105 |
+
node_ctor_arg_rvalue_string(arg) for arg in schema.filtered_args()
|
106 |
+
]
|
107 |
+
return ", ".join(node_ctor_values)
|
108 |
+
|
109 |
+
|
110 |
+
def gen_fallback_code(
|
111 |
+
schema: LazyIrSchema,
|
112 |
+
sig: Union[DispatcherSignature, NativeSignature],
|
113 |
+
overload_name: str,
|
114 |
+
) -> str:
|
115 |
+
"""
|
116 |
+
Generate code that falls back to eager conditioned on a predicate
|
117 |
+
"""
|
118 |
+
dispatcher_sig = DispatcherSignature.from_schema(schema.func)
|
119 |
+
exprs = translate(sig.arguments(), dispatcher_sig.arguments())
|
120 |
+
fallback_args = ",\n ".join([a.expr for a in exprs])
|
121 |
+
if len(overload_name):
|
122 |
+
aten_op_str = f"ATEN_OP2({schema.aten_name}, {overload_name})"
|
123 |
+
else:
|
124 |
+
aten_op_str = f"ATEN_OP({schema.aten_name})"
|
125 |
+
return f"""
|
126 |
+
if (force_eager_fallback({aten_symbol(schema)})) {{
|
127 |
+
return at::native::call_fallback_fn_symint<<c_eager_fallback, {aten_op_str}>::call(
|
128 |
+
{fallback_args}
|
129 |
+
);
|
130 |
+
}}
|
131 |
+
"""
|
132 |
+
|
133 |
+
|
134 |
+
def aten_symbol(schema: LazyIrSchema) -> str:
|
135 |
+
missing_interned_strings = {
|
136 |
+
"sigmoid_backward",
|
137 |
+
}
|
138 |
+
if schema.aten_name in missing_interned_strings:
|
139 |
+
return f'c10::Symbol::fromQualString("aten::{schema.aten_name}")'
|
140 |
+
|
141 |
+
if not schema.aten_name.startswith("at::"):
|
142 |
+
return f"at::aten::{schema.aten_name}"
|
143 |
+
else:
|
144 |
+
return schema.aten_name
|
145 |
+
|
146 |
+
|
147 |
+
# converts all tensor-like arguments to meta tensors. Returns:
|
148 |
+
# (1) a string containing all of the logic that does the conversions.
|
149 |
+
# (2) a context, to be used by translate(), with all of the relevant bindings.
|
150 |
+
def convert_to_meta_tensors(sig: DispatcherSignature) -> Tuple[str, List[Binding]]:
|
151 |
+
context: List[Binding] = []
|
152 |
+
unwrapped_tensor_args: List[str] = []
|
153 |
+
for arg in sig.arguments():
|
154 |
+
if isinstance(arg.argument, Argument) and arg.argument.type.is_tensor_like():
|
155 |
+
unwrapped_name = f"{arg.name}_meta"
|
156 |
+
unwrapped_tensor_args.append(
|
157 |
+
f"auto {unwrapped_name} = to_meta({arg.name});"
|
158 |
+
)
|
159 |
+
context.append(arg.with_name(unwrapped_name))
|
160 |
+
else:
|
161 |
+
context.append(arg)
|
162 |
+
unwrap_tensor_args_str = "\n ".join(unwrapped_tensor_args)
|
163 |
+
return unwrap_tensor_args_str, context
|
164 |
+
|
165 |
+
|
166 |
+
@dataclass(frozen=True)
|
167 |
+
class GenLazyIR(ABC):
|
168 |
+
backend_index: BackendIndex
|
169 |
+
backend_name: str
|
170 |
+
node_base: str
|
171 |
+
use_lazy_shape: bool
|
172 |
+
|
173 |
+
@method_with_native_function
|
174 |
+
def __call__(self, f: Union[NativeFunctionsGroup, NativeFunction]) -> List[str]:
|
175 |
+
func = f.functional.func if isinstance(f, NativeFunctionsGroup) else f.func
|
176 |
+
metadata = self.backend_index.get_kernel(
|
177 |
+
f.functional if isinstance(f, NativeFunctionsGroup) else f
|
178 |
+
)
|
179 |
+
schema = LazyIrSchema(
|
180 |
+
func, symint=metadata is not None and metadata.supports_symint()
|
181 |
+
)
|
182 |
+
return self.gen(schema)
|
183 |
+
|
184 |
+
# there is no lowering functionality generated unless this IR base class is subclassed and
|
185 |
+
# implemented as a backend-specific node
|
186 |
+
def lowering_function(self, schema: LazyIrSchema) -> str:
|
187 |
+
return ""
|
188 |
+
|
189 |
+
def create_function(self, schema: LazyIrSchema, node_ctor_args: str) -> str:
|
190 |
+
return ""
|
191 |
+
|
192 |
+
def can_be_reused_function(self, schema: LazyIrSchema, node_ctor_args: str) -> str:
|
193 |
+
return f"""bool CanBeReused({node_ctor_args}) const {{
|
194 |
+
return false;
|
195 |
+
}}"""
|
196 |
+
|
197 |
+
def node_base_ctor_call(self, schema: LazyIrSchema) -> str:
|
198 |
+
value_args = schema.filtered_args(values=True, scalars=False)
|
199 |
+
# backends can customize the way the node base class constructor is called,
|
200 |
+
# as long as all of its arguments can be generated from information available from the schema
|
201 |
+
base_ctor_value_args_list = []
|
202 |
+
for arg in value_args:
|
203 |
+
if isinstance(arg.lazy_type, (BaseCType, VectorCType)):
|
204 |
+
base_ctor_value_args_list.append(f"{arg.name}")
|
205 |
+
elif isinstance(arg.lazy_type, OptionalCType):
|
206 |
+
base_ctor_value_args_list.append(f"{arg.name}.value_or(kNullValue)")
|
207 |
+
else:
|
208 |
+
raise AssertionError(
|
209 |
+
f"Unsupported type ({arg.lazy_type}) - add support if necessary"
|
210 |
+
)
|
211 |
+
base_ctor_value_args = ", ".join(base_ctor_value_args_list)
|
212 |
+
|
213 |
+
scalar_args = schema.filtered_args(values=False, scalars=True)
|
214 |
+
|
215 |
+
# Shape construction.
|
216 |
+
# Conditionally build shape depending on specified shape property
|
217 |
+
if schema.properties.ShapePrecompute:
|
218 |
+
shape_ctor_arg = "std::move(shapes),"
|
219 |
+
elif schema.properties.ShapeCompute:
|
220 |
+
shape_args = [a.name for a in value_args]
|
221 |
+
shape_args.extend(a.name for a in scalar_args)
|
222 |
+
shape_ctor_arg = f"compute_shape_{schema.name}({', '.join(shape_args)}),"
|
223 |
+
elif schema.properties.ShapeCache:
|
224 |
+
shape_args = [f"operand({i})" for i in range(len(value_args))]
|
225 |
+
shape_args.extend(a.name for a in scalar_args)
|
226 |
+
shape_ctor_arg = f"[&](){{ return compute_shape_{schema.name}({', '.join(shape_args)})[0]; }},"
|
227 |
+
else:
|
228 |
+
shape_ctor_arg = ""
|
229 |
+
|
230 |
+
scalar_hashes = ", ".join(f"{a.name}" for a in scalar_args)
|
231 |
+
|
232 |
+
return f"""{self.node_base}(
|
233 |
+
{schema.node_name}::ClassOpKind(),
|
234 |
+
OpList{{{base_ctor_value_args}}},
|
235 |
+
{shape_ctor_arg}
|
236 |
+
/* num_outputs */ {len(schema.returns)},
|
237 |
+
torch::lazy::MHash({scalar_hashes}))"""
|
238 |
+
|
239 |
+
def gen(self, schema: LazyIrSchema) -> List[str]:
|
240 |
+
opkind = schema.opkind or aten_symbol(schema)
|
241 |
+
|
242 |
+
# for now, we just want one IR class decl and soon after also the method defs
|
243 |
+
# and we use the functional version not out/inplace.
|
244 |
+
all_args = schema.filtered_args()
|
245 |
+
value_args = schema.filtered_args(values=True, scalars=False)
|
246 |
+
scalar_args = schema.filtered_args(values=False, scalars=True)
|
247 |
+
|
248 |
+
ctor_args = [f"const {i.lazy_type.cpp_type()}& {i.name}" for i in all_args]
|
249 |
+
reuse_ctor_args = ", ".join(ctor_args)
|
250 |
+
if self.use_lazy_shape and schema.properties.ShapePrecompute:
|
251 |
+
ctor_args.append("std::vector<torch::lazy::Shape>&& shapes")
|
252 |
+
node_ctor_args = ", ".join(ctor_args)
|
253 |
+
|
254 |
+
scalar_initializers = ",\n ".join(
|
255 |
+
[
|
256 |
+
# This code is just special casing the mapping from string_view -> strings
|
257 |
+
f"{a.name}({a.name}.has_value() ? c10::make_optional(std::string(*{a.name})) : c10::nullopt)"
|
258 |
+
if a.lazy_type.cpp_type() == "c10::optional<c10::string_view>"
|
259 |
+
else f"{a.name}({a.name})"
|
260 |
+
for a in scalar_args
|
261 |
+
]
|
262 |
+
)
|
263 |
+
if len(scalar_initializers):
|
264 |
+
scalar_initializers = f",\n {scalar_initializers}"
|
265 |
+
scalar_decls = "\n ".join(
|
266 |
+
[
|
267 |
+
f"std::string {a.name};"
|
268 |
+
if a.lazy_type.cpp_type() == "c10::string_view"
|
269 |
+
else f"c10::optional<std::string> {a.name};"
|
270 |
+
if a.lazy_type.cpp_type() == "c10::optional<c10::string_view>"
|
271 |
+
else f"{a.lazy_type.cpp_type()} {a.name};"
|
272 |
+
for a in scalar_args
|
273 |
+
]
|
274 |
+
)
|
275 |
+
optional_values = [
|
276 |
+
arg.name
|
277 |
+
for arg in schema.filtered_args(values=True, scalars=False)
|
278 |
+
if isinstance(arg.lazy_type, OptionalCType)
|
279 |
+
]
|
280 |
+
has_optional_decls = "\n ".join(
|
281 |
+
[f"bool has_{value}: 1;" for value in optional_values]
|
282 |
+
)
|
283 |
+
has_optional_defs = "\n ".join(
|
284 |
+
[f"has_{value} = !!{value};" for value in optional_values]
|
285 |
+
)
|
286 |
+
members_to_string = []
|
287 |
+
for arg in scalar_args:
|
288 |
+
if isinstance(arg.lazy_type, OptionalCType):
|
289 |
+
value = f"{arg.name}.value()"
|
290 |
+
if arg.is_generator:
|
291 |
+
value = '"torch.Generator()"'
|
292 |
+
members_to_string.append(
|
293 |
+
f"""if ({arg.name}.has_value()) {{
|
294 |
+
ss << ", {arg.name}=" << {value};
|
295 |
+
}} else {{
|
296 |
+
ss << ", {arg.name}=null";
|
297 |
+
}}"""
|
298 |
+
)
|
299 |
+
else:
|
300 |
+
members_to_string.append(f'ss << ", {arg.name}=" << {arg.name};')
|
301 |
+
members_to_string_str = "\n ".join(members_to_string)
|
302 |
+
|
303 |
+
return [
|
304 |
+
f"""\
|
305 |
+
class {schema.node_name} : public {self.node_base} {{
|
306 |
+
public:
|
307 |
+
static torch::lazy::OpKind ClassOpKind() {{
|
308 |
+
return torch::lazy::OpKind({opkind});
|
309 |
+
}}
|
310 |
+
|
311 |
+
{schema.node_name}({node_ctor_args})
|
312 |
+
: {self.node_base_ctor_call(schema)}{scalar_initializers}
|
313 |
+
{{
|
314 |
+
{has_optional_defs}
|
315 |
+
}}
|
316 |
+
|
317 |
+
std::string ToString() const override {{
|
318 |
+
std::stringstream ss;
|
319 |
+
ss << {self.node_base}::ToString();
|
320 |
+
{members_to_string_str}
|
321 |
+
return ss.str();
|
322 |
+
}}
|
323 |
+
|
324 |
+
{self.create_function(schema, reuse_ctor_args)}
|
325 |
+
|
326 |
+
{self.can_be_reused_function(schema, reuse_ctor_args)}
|
327 |
+
|
328 |
+
{self.lowering_function(schema)}
|
329 |
+
|
330 |
+
{scalar_decls}
|
331 |
+
{has_optional_decls}
|
332 |
+
|
333 |
+
}};
|
334 |
+
|
335 |
+
""",
|
336 |
+
]
|
337 |
+
|
338 |
+
|
339 |
+
@dataclass(frozen=True)
|
340 |
+
class GenTSLazyIR(GenLazyIR):
|
341 |
+
def lowering_function(self, schema: LazyIrSchema) -> str:
|
342 |
+
signature = """
|
343 |
+
torch::lazy::TSOpVector Lower(
|
344 |
+
std::shared_ptr<torch::jit::GraphFunction> function,
|
345 |
+
torch::lazy::TSLoweringContext* loctx) const override"""
|
346 |
+
|
347 |
+
if schema.properties.LowerDeclOnly:
|
348 |
+
return f"{signature};"
|
349 |
+
elif schema.properties.Lower:
|
350 |
+
return f"""{signature} {{
|
351 |
+
{ts_lowering_body(schema)}
|
352 |
+
}}
|
353 |
+
"""
|
354 |
+
else:
|
355 |
+
return ""
|
356 |
+
|
357 |
+
def create_function(self, schema: LazyIrSchema, node_ctor_args: str) -> str:
|
358 |
+
signature = f"static NodePtr Create({node_ctor_args})"
|
359 |
+
if schema.properties.CreateFnDeclOnly:
|
360 |
+
return f"{signature};"
|
361 |
+
elif not schema.properties.CreateFn:
|
362 |
+
return ""
|
363 |
+
return f"""{signature} {{
|
364 |
+
return ReuseOrMakeNode<{schema.node_name}>(data);
|
365 |
+
}}"""
|
366 |
+
|
367 |
+
def can_be_reused_function(self, schema: LazyIrSchema, node_ctor_args: str) -> str:
|
368 |
+
signature = f"bool CanBeReused({node_ctor_args}) const"
|
369 |
+
if schema.properties.CanBeReusedDeclOnly:
|
370 |
+
return f"{signature};"
|
371 |
+
elif not schema.properties.CanBeReused:
|
372 |
+
return ""
|
373 |
+
value_comparison = []
|
374 |
+
for arg in itertools.chain(schema.positional_values, schema.keyword_values):
|
375 |
+
if isinstance(arg.lazy_type, OptionalCType):
|
376 |
+
value_comparison.append(
|
377 |
+
f"nullable_operand(i++) == {arg.name}.value_or(kNullValue)"
|
378 |
+
)
|
379 |
+
else:
|
380 |
+
value_comparison.append(f"operand(i++) == {arg.name}")
|
381 |
+
for arg in itertools.chain(schema.positional_scalars, schema.keyword_scalars):
|
382 |
+
if isinstance(arg.lazy_type, OptionalCType):
|
383 |
+
value_comparison.append(
|
384 |
+
f"((!this->{arg.name}&&!{arg.name}) || (this->{arg.name}&&{arg.name} && *(this->{arg.name}) == *{arg.name}))"
|
385 |
+
)
|
386 |
+
else:
|
387 |
+
value_comparison.append(f"this->{arg.name} == {arg.name}")
|
388 |
+
value_comparison_str = " &&\n ".join(value_comparison)
|
389 |
+
|
390 |
+
return f"""{signature} {{
|
391 |
+
size_t i = 0;
|
392 |
+
return ({value_comparison_str});
|
393 |
+
}}"""
|
394 |
+
|
395 |
+
|
396 |
+
@dataclass(frozen=True)
|
397 |
+
class GenLazyNativeFuncDefinition:
|
398 |
+
class_method_name: str
|
399 |
+
backend_index: BackendIndex
|
400 |
+
tensor_class: str
|
401 |
+
gen_forced_fallback_code: bool
|
402 |
+
backend_namespace: str
|
403 |
+
get_tensorlist: str
|
404 |
+
get_tensor_or_wrap_number: str
|
405 |
+
try_get_tensor: str
|
406 |
+
metrics_counter: str
|
407 |
+
create_tensor: str
|
408 |
+
create_from_first_tensor: bool
|
409 |
+
create_aten_from_ltc_tensor: str
|
410 |
+
tuple_aten_from_ltc_tensors: str
|
411 |
+
lazy_tensor_ptr: str
|
412 |
+
get_device_fn: str
|
413 |
+
|
414 |
+
def lazy_tensor_decls(self, func: NativeFunction, schema: LazyIrSchema) -> str:
|
415 |
+
value_args = schema.filtered_args(values=True, scalars=False)
|
416 |
+
# Generates lazy_{name} variables for LazyTensors wrapping input tensors
|
417 |
+
lazy_tensor_decls: List[str] = []
|
418 |
+
for arg in value_args:
|
419 |
+
if arg.is_wrapped_scalar:
|
420 |
+
if isinstance(arg.lazy_type, OptionalCType):
|
421 |
+
lazy_tensor_decls.append(
|
422 |
+
f"""auto node_{arg.name} = {arg.name} ?
|
423 |
+
c10::make_optional(torch::lazy::LazyGraphExecutor::Get()->
|
424 |
+
GetIrValueForScalarFromCodegen(*{arg.name}, *common_device)):
|
425 |
+
c10::nullopt;"""
|
426 |
+
)
|
427 |
+
else:
|
428 |
+
lazy_tensor_decls.append(
|
429 |
+
f"""auto node_{arg.name} = torch::lazy::LazyGraphExecutor::Get()->
|
430 |
+
GetIrValueForScalarFromCodegen({arg.name}, *common_device);"""
|
431 |
+
)
|
432 |
+
elif arg.is_symint_or_list:
|
433 |
+
continue # values are extracted in isValueType
|
434 |
+
elif isinstance(arg.lazy_type, BaseCType):
|
435 |
+
if arg.lazy_type.type is tensorListValueT:
|
436 |
+
lazy_tensor_decls.append(
|
437 |
+
f"auto lazy_{arg.name}_tensorlist = "
|
438 |
+
f"{self.backend_namespace}::{self.get_tensorlist}({arg.name});"
|
439 |
+
)
|
440 |
+
else:
|
441 |
+
lazy_tensor_decls.append(
|
442 |
+
f"{self.lazy_tensor_ptr} lazy_{arg.name} = "
|
443 |
+
f"{self.backend_namespace}::{self.get_tensor_or_wrap_number}({arg.name}, *common_device);"
|
444 |
+
)
|
445 |
+
elif isinstance(arg.lazy_type, OptionalCType):
|
446 |
+
assert arg.lazy_type.elem == BaseCType(getValueT()), arg.lazy_type.elem
|
447 |
+
# TODO(alanwaketan): Maybe we want to apply GetLtcTensorOrCreateForWrappedNumber here, but hold it
|
448 |
+
# until we encounter a real world example.
|
449 |
+
lazy_tensor_decls.append(
|
450 |
+
f"{self.lazy_tensor_ptr} lazy_{arg.name} = "
|
451 |
+
f"{self.backend_namespace}::{self.try_get_tensor}({arg.name}.value_or(at::Tensor()));"
|
452 |
+
)
|
453 |
+
else:
|
454 |
+
raise AssertionError(
|
455 |
+
f"TODO not sure if there are other valid types to handle here ({arg.lazy_type})"
|
456 |
+
)
|
457 |
+
return ("\n ").join(lazy_tensor_decls)
|
458 |
+
|
459 |
+
def force_eager_fallback(
|
460 |
+
self,
|
461 |
+
func: NativeFunction,
|
462 |
+
schema: LazyIrSchema,
|
463 |
+
metadata: BackendMetadata,
|
464 |
+
sig: Union[DispatcherSignature, NativeSignature],
|
465 |
+
) -> str:
|
466 |
+
if self.gen_forced_fallback_code:
|
467 |
+
return gen_fallback_code(
|
468 |
+
schema, sig, overload_name=func.func.name.overload_name
|
469 |
+
)
|
470 |
+
return ""
|
471 |
+
|
472 |
+
def metrics(self, func: NativeFunction, schema: LazyIrSchema) -> str:
|
473 |
+
return f"{self.metrics_counter};"
|
474 |
+
|
475 |
+
def get_device(self, func: NativeFunction, schema: LazyIrSchema) -> str:
|
476 |
+
value_args = schema.filtered_args(values=True, scalars=False)
|
477 |
+
scalar_args = schema.filtered_args(values=False, scalars=True)
|
478 |
+
value_types_names = [f"{a.name}" for a in value_args if not a.is_wrapped_scalar]
|
479 |
+
optional_device = OptionalCType(BaseCType(deviceT))
|
480 |
+
optional_devices = [
|
481 |
+
a.name for a in scalar_args if a.lazy_type == optional_device
|
482 |
+
]
|
483 |
+
assert (
|
484 |
+
len(value_types_names) > 0 or len(optional_devices) > 0
|
485 |
+
), "Expected at least one Value or Device type"
|
486 |
+
get_device_str = (
|
487 |
+
f"{self.get_device_fn}({', '.join(value_types_names + optional_devices)})"
|
488 |
+
)
|
489 |
+
return f"""auto common_device = {get_device_str};
|
490 |
+
TORCH_INTERNAL_ASSERT(common_device);
|
491 |
+
"""
|
492 |
+
|
493 |
+
def shape_inference(self, func: NativeFunction, schema: LazyIrSchema) -> str:
|
494 |
+
metadata = self.backend_index.get_kernel(func)
|
495 |
+
assert metadata is not None
|
496 |
+
all_args = schema.filtered_args()
|
497 |
+
returns_length = len(schema.returns)
|
498 |
+
# call the meta kernel if it exists, to compute output shape/dtype for our IR
|
499 |
+
# Note [Generated LTC Shape Functions]
|
500 |
+
# LTC uses meta tensors from core to do shape inference when possible, and otherwise
|
501 |
+
# we generate a shape function declaration that needs to be manually implemented.
|
502 |
+
# How do we detect which ops are eligible to use meta tensors?
|
503 |
+
# In general we should be able to use meta tensors not just on structured operators,
|
504 |
+
# but also on composite operators that are implemented in terms of structured kernels.
|
505 |
+
# We don't currently have a way of knowing at codegen time which ops are implemented that way.
|
506 |
+
# This is the case for all view and view_copy operators however, so we're going to
|
507 |
+
# use them specifically for all of the view_copy ops (instead of manually writing shape rules for all of them).
|
508 |
+
is_view_copy_op = "view_copy" in func.tags
|
509 |
+
is_structured = func.structured or func.structured_delegate is not None
|
510 |
+
if is_structured or is_view_copy_op:
|
511 |
+
meta_out = """
|
512 |
+
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};"""
|
513 |
+
if returns_length > 1:
|
514 |
+
|
515 |
+
def this_shape(i: int) -> str:
|
516 |
+
return f"torch::lazy::Shape(std::get<{i}>(out_meta).scalar_type(), std::get<{i}>(out_meta).sizes().vec())"
|
517 |
+
|
518 |
+
shapes_str = ",".join([this_shape(i) for i in range(returns_length)])
|
519 |
+
meta_out = "std::vector<torch::lazy::Shape> shapes{" + shapes_str + "};"
|
520 |
+
|
521 |
+
# Convert tensor args to the meta device and call it.
|
522 |
+
# (We can't pass in the input tensors directly, because they are "functional wrappers".
|
523 |
+
# If any of the meta kernels call a tensor op and redispatch, we don't want to hit the functionalize kernels.)
|
524 |
+
# Even at::meta:: functions might redispatch, e.g. if they call into view ops.
|
525 |
+
dispatcher_sig = DispatcherSignature.from_schema(func.func)
|
526 |
+
meta_conversion_str, meta_call_ctx = convert_to_meta_tensors(dispatcher_sig)
|
527 |
+
meta_call_args = [
|
528 |
+
e.expr
|
529 |
+
for e in translate(
|
530 |
+
meta_call_ctx, dispatcher_sig.arguments(), method=False
|
531 |
+
)
|
532 |
+
]
|
533 |
+
if is_view_copy_op:
|
534 |
+
# view_copy ops always have a CompositeExplicitAutogradNonFunctional kernel
|
535 |
+
assert func.has_composite_explicit_autograd_non_functional_kernel
|
536 |
+
dispatch_ns = "compositeexplicitautogradnonfunctional"
|
537 |
+
else:
|
538 |
+
dispatch_ns = "meta"
|
539 |
+
aten_name = schema.aten_name
|
540 |
+
# TODO: this is trolling
|
541 |
+
if func.func.has_symint() and metadata.supports_symint():
|
542 |
+
aten_name += "_symint"
|
543 |
+
shape_str = f"""\
|
544 |
+
{meta_conversion_str}
|
545 |
+
auto out_meta = at::{dispatch_ns}::{aten_name}({', '.join(meta_call_args)});
|
546 |
+
{meta_out}"""
|
547 |
+
else:
|
548 |
+
shape_sig = ComputeShapeSignature(
|
549 |
+
metadata.kernel, func, symint=metadata.supports_symint()
|
550 |
+
)
|
551 |
+
shape_str = f"""
|
552 |
+
auto shapes = {shape_sig.shape_call};"""
|
553 |
+
|
554 |
+
shape_str += f"""
|
555 |
+
TORCH_INTERNAL_ASSERT(shapes.size() == {returns_length});"""
|
556 |
+
|
557 |
+
# Calculating which dimensions are symbolic
|
558 |
+
func_schema_str = "aten::" + str(func.func)
|
559 |
+
shape_str += f"""
|
560 |
+
if(torch::lazy::symbolicShapeEnabled()){{
|
561 |
+
std::vector<torch::jit::IValue> inputs = {{ {', '.join(str(a.name) for a in all_args)} }};
|
562 |
+
const char* schema_str = "{func_schema_str}";
|
563 |
+
applySymbolicShapesOnLT(schema_str, inputs, shapes);
|
564 |
+
}}
|
565 |
+
"""
|
566 |
+
return shape_str
|
567 |
+
|
568 |
+
def build_ir_node(self, func: NativeFunction, schema: LazyIrSchema) -> str:
|
569 |
+
node_ctor_input_str = node_ctor_inputs(schema)
|
570 |
+
return f"""torch::lazy::NodePtr node = torch::lazy::ReuseNode<{schema.node_name}>({node_ctor_input_str});
|
571 |
+
if (!node) {{
|
572 |
+
{self.shape_inference(func, schema)}
|
573 |
+
node = torch::lazy::MakeNode<{schema.node_name}>({node_ctor_input_str}, std::move(shapes));
|
574 |
+
CacheNode(node);
|
575 |
+
}}
|
576 |
+
"""
|
577 |
+
|
578 |
+
def create_lazy_tensor(self, first_tensor_name: Optional[str] = None) -> str:
|
579 |
+
# xla uses an instance method for tensor creation, for the time being
|
580 |
+
if self.create_from_first_tensor:
|
581 |
+
# TODO(whc) remove this if XLA switches to using static method for creation
|
582 |
+
assert (
|
583 |
+
first_tensor_name is not None
|
584 |
+
), "Requires first tensor to create lazy tensor"
|
585 |
+
return f"{first_tensor_name}.{self.create_tensor}"
|
586 |
+
return f"{self.backend_namespace}::{self.create_tensor}"
|
587 |
+
|
588 |
+
def return_aten_tensor(self, func: NativeFunction, schema: LazyIrSchema) -> str:
|
589 |
+
returns_length = len(schema.returns)
|
590 |
+
value_args = schema.filtered_args(values=True, scalars=False)
|
591 |
+
value_types_names = [f"{a.name}" for a in value_args if not a.is_wrapped_scalar]
|
592 |
+
first_tensor_name = value_types_names[0] if len(value_types_names) > 0 else None
|
593 |
+
bridge_str = f"""auto result = {self.create_aten_from_ltc_tensor}(
|
594 |
+
{self.create_lazy_tensor(first_tensor_name)}(std::move(node), *common_device));"""
|
595 |
+
|
596 |
+
if returns_length > 1:
|
597 |
+
assert (
|
598 |
+
len(value_types_names) > 0
|
599 |
+
), "Code below assumes there is at least one tensor arg"
|
600 |
+
bridge_str = f"""std::vector<{self.lazy_tensor_ptr}> lazy_tensors;
|
601 |
+
for (int i = 0; i < {returns_length}; i++) {{
|
602 |
+
lazy_tensors.push_back({self.create_lazy_tensor(first_tensor_name)}({getValueT()}(node, i), *common_device));
|
603 |
+
}}
|
604 |
+
auto result = {self.tuple_aten_from_ltc_tensors}<{returns_length}>(lazy_tensors);"""
|
605 |
+
|
606 |
+
if schema.name.name.inplace or func.func.is_out_fn():
|
607 |
+
assert returns_length == 1, (
|
608 |
+
"We assumed there was no such case where an op is an in-place variant "
|
609 |
+
f"and has tuple outputs, but got tuple of len {returns_length}."
|
610 |
+
)
|
611 |
+
bridge_str = f"""lazy_{first_tensor_name}->SetInPlaceIrValue(node);
|
612 |
+
auto& result = {first_tensor_name};"""
|
613 |
+
|
614 |
+
bridge_str += """
|
615 |
+
return result;"""
|
616 |
+
return bridge_str
|
617 |
+
|
618 |
+
@method_with_native_function
|
619 |
+
def __call__(self, func: NativeFunction) -> List[str]:
|
620 |
+
sig = kernel_signature(func, self.backend_index)
|
621 |
+
metadata = self.backend_index.get_kernel(func)
|
622 |
+
assert metadata is not None
|
623 |
+
schema = LazyIrSchema(func.func, symint=metadata.supports_symint())
|
624 |
+
return [
|
625 |
+
f"""\
|
626 |
+
{sig.decl(name=f"{self.class_method_name}::{metadata.kernel}")} {{
|
627 |
+
{self.force_eager_fallback(func, schema, metadata, sig)}
|
628 |
+
{self.metrics(func, schema)}
|
629 |
+
{self.get_device(func, schema)}
|
630 |
+
{self.lazy_tensor_decls(func, schema)}
|
631 |
+
{self.build_ir_node(func, schema)}
|
632 |
+
{self.return_aten_tensor(func, schema)}
|
633 |
+
}}\n
|
634 |
+
"""
|
635 |
+
]
|
636 |
+
|
637 |
+
|
638 |
+
class ComputeShapeSignature:
|
639 |
+
"""
|
640 |
+
Here we use the base name as the suffix of the signature to avoid generating for in-place variants.
|
641 |
+
"""
|
642 |
+
|
643 |
+
def __init__(self, kernel_name: str, f: NativeFunction, *, symint: bool):
|
644 |
+
self.__schema = LazyIrSchema(f.func, symint=symint)
|
645 |
+
self.__dispatch_args = ", ".join(
|
646 |
+
[a.decl() for a in dispatcher.arguments(f.func, symint=symint)]
|
647 |
+
)
|
648 |
+
self.__call_args = ", ".join(
|
649 |
+
[f"{arg.name}" for arg in self.__schema.filtered_args(generator=True)]
|
650 |
+
)
|
651 |
+
self.__kernel_name = kernel_name
|
652 |
+
|
653 |
+
def __decl_suffix(self) -> str:
|
654 |
+
return f"{self.__kernel_name}({self.__dispatch_args})"
|
655 |
+
|
656 |
+
def __call_suffix(self) -> str:
|
657 |
+
return f"{self.__kernel_name}({self.__call_args})"
|
658 |
+
|
659 |
+
@property
|
660 |
+
def shape_decl(self) -> str:
|
661 |
+
return f"TORCH_API std::vector<torch::lazy::Shape> compute_shape_{self.__decl_suffix()}"
|
662 |
+
|
663 |
+
@property
|
664 |
+
def shape_call(self) -> str:
|
665 |
+
return f"torch::lazy::compute_shape_{self.__call_suffix()}"
|
666 |
+
|
667 |
+
|
668 |
+
@dataclass(frozen=True)
|
669 |
+
class GenLazyShapeInferenceDefinition:
|
670 |
+
backend_index: BackendIndex
|
671 |
+
tensor_class: str
|
672 |
+
|
673 |
+
@method_with_native_function
|
674 |
+
def __call__(self, f: NativeFunction) -> List[str]:
|
675 |
+
sig = kernel_signature(f, self.backend_index)
|
676 |
+
metadata = self.backend_index.get_kernel(f)
|
677 |
+
assert metadata is not None
|
678 |
+
|
679 |
+
# See Note [Generated LTC Shape Functions]
|
680 |
+
is_view_copy_op = "view_copy" in f.tags
|
681 |
+
is_structured = f.structured or f.structured_delegate is not None
|
682 |
+
if is_structured or is_view_copy_op:
|
683 |
+
return []
|
684 |
+
else:
|
685 |
+
shape_sig = ComputeShapeSignature(
|
686 |
+
metadata.kernel, f, symint=metadata.supports_symint()
|
687 |
+
)
|
688 |
+
return ["\n".join([f"{shape_sig.shape_decl};"])]
|
689 |
+
|
690 |
+
|
691 |
+
def generate_non_native_lazy_ir_nodes(
|
692 |
+
non_native: List[Dict[str, Any]], gen_lazy_ir: GenLazyIR
|
693 |
+
) -> List[str]:
|
694 |
+
"""Generate the non-native lazy IR node classes"""
|
695 |
+
nodes = []
|
696 |
+
for op in non_native:
|
697 |
+
# Set default properties for Non-Native IRs
|
698 |
+
properties = LazyIrProperties("ShapeCache", "CanBeReused", "LowerDeclOnly")
|
699 |
+
for p in op.get("properties", []):
|
700 |
+
setattr(properties, p, True)
|
701 |
+
|
702 |
+
# non-native is assumed to want symint bindings if you wrote symint
|
703 |
+
schema = LazyIrSchema(FunctionSchema.parse(op["func"]), properties, symint=True)
|
704 |
+
schema.opkind = op.get("opkind")
|
705 |
+
nodes.append(gen_lazy_ir.gen(schema)[0])
|
706 |
+
|
707 |
+
return nodes
|
venv/lib/python3.10/site-packages/torchgen/dest/lazy_ts_lowering.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torchgen.api.lazy import LazyArgument, LazyIrSchema
|
2 |
+
from torchgen.api.types import OptionalCType
|
3 |
+
|
4 |
+
|
5 |
+
def ts_lowering_body(schema: LazyIrSchema) -> str:
|
6 |
+
# for now, we just want one IR class decl and soon after also the method defs
|
7 |
+
# and we use the functional version not out/inplace.
|
8 |
+
emplace_arguments = []
|
9 |
+
|
10 |
+
def get_value(arg: LazyArgument) -> str:
|
11 |
+
if isinstance(arg.lazy_type, OptionalCType):
|
12 |
+
return f"has_{arg.name} ? loctx->GetOutputOp(operand(i++)) : nullptr"
|
13 |
+
return "loctx->GetOutputOp(operand(i++))"
|
14 |
+
|
15 |
+
for arg in schema.positional_args:
|
16 |
+
if arg.is_lazy_value:
|
17 |
+
emplace_arguments.append(get_value(arg))
|
18 |
+
continue
|
19 |
+
emplace_arguments.append(f'"{arg.name}", {arg.name}')
|
20 |
+
|
21 |
+
emplace_arguments_str = "\n ".join(
|
22 |
+
[f"arguments.emplace_back({a});" for a in emplace_arguments]
|
23 |
+
)
|
24 |
+
emplace_kwarg_values = [
|
25 |
+
f'"{arg.name}", {get_value(arg)}' for arg in schema.keyword_values
|
26 |
+
]
|
27 |
+
emplace_kwarg_scalars = [
|
28 |
+
f'"{arg.name}", {arg.name}' for arg in schema.keyword_scalars
|
29 |
+
]
|
30 |
+
emplace_kwarguments = "\n ".join(
|
31 |
+
[
|
32 |
+
f"kwarguments.emplace_back({a});"
|
33 |
+
for a in emplace_kwarg_values + emplace_kwarg_scalars
|
34 |
+
]
|
35 |
+
)
|
36 |
+
return f"""\
|
37 |
+
std::vector<torch::jit::NamedValue> arguments;
|
38 |
+
std::vector<torch::jit::NamedValue> kwarguments;
|
39 |
+
arguments.reserve({len(emplace_arguments)});
|
40 |
+
kwarguments.reserve({len(emplace_kwarg_values + emplace_kwarg_scalars)});
|
41 |
+
size_t i = 0;
|
42 |
+
{emplace_arguments_str}
|
43 |
+
{emplace_kwarguments}
|
44 |
+
torch::lazy::TSOpVector {schema.aten_name}_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments);
|
45 |
+
TORCH_CHECK_EQ({schema.aten_name}_out.size(), {len(schema.returns)});
|
46 |
+
|
47 |
+
return {schema.aten_name}_out;
|
48 |
+
"""
|