text
stringlengths 145
7.65M
|
---|
================================================================================================================================================
SOURCE CODE FILE: multi_threaded_pg.py
LINES: 1
SIZE: 18.98 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\multi_threaded_pg.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import sys
import threading
from dataclasses import dataclass
from typing import Optional, Union
from functools import partial, reduce
import torch
import torch.distributed as dist
import weakref
from torch._C._distributed_c10d import (
_create_work_from_future,
AllgatherOptions,
AllreduceOptions,
AllToAllOptions,
BarrierOptions,
BroadcastOptions,
ReduceScatterOptions,
ScatterOptions,
Store,
ReduceOp,
)
from torch.distributed.distributed_c10d import _CollOp, _store_based_barrier, P2POp
from torch.futures import Future
from torch.utils import _pytree as pytree
"""
TODO:
Lots of missing collectives.
Collectives validation.
Make timeout robust by making collectives respect the test deadline.
Make tests robust by making collectives interruptible.
We need some synchronization around cleanup to ensure that timedout ranks don't cause spurious failures.
"""
def flatten_list(lst):
return pytree.tree_leaves(lst)
def ret_work(ret):
fut = Future()
fut.set_result(ret)
return _create_work_from_future(fut)
def binop_reduce(tensors, op):
res = op(torch.stack(tensors), dim=0)
if isinstance(res, torch.Tensor):
return res
# min/max return a namedtuple
return res.values
def bitwise_reduce(tensors, op):
return reduce(op, tensors)
_reduce_ops = {
ReduceOp.SUM: partial(binop_reduce, op=torch.sum),
ReduceOp.AVG: partial(binop_reduce, op=torch.mean),
ReduceOp.PRODUCT: partial(binop_reduce, op=torch.prod),
ReduceOp.MIN: partial(binop_reduce, op=torch.min),
ReduceOp.MAX: partial(binop_reduce, op=torch.max),
ReduceOp.BAND: partial(bitwise_reduce, op=torch.bitwise_and),
ReduceOp.BOR: partial(bitwise_reduce, op=torch.bitwise_or),
ReduceOp.BXOR: partial(bitwise_reduce, op=torch.bitwise_xor),
}
class AllToAll:
@torch.no_grad()
def work(self, data):
world_size = len(data)
for dest_rank in range(world_size):
output_tensor_list, _ = data[dest_rank]
for src_rank in range(world_size):
_, input_tensor_list = data[src_rank]
output_tensor_list[src_rank].copy_(input_tensor_list[dest_rank])
class AllToAllBase:
@torch.no_grad()
def work(self, data):
world_size = len(data)
for dest_rank in range(world_size):
output_buffer, _, output_split_sizes, _ = data[dest_rank]
output_indexes = self._size_cumsum(output_buffer.size(0), output_split_sizes, world_size)
for src_rank in range(world_size):
_, input_buffer, _, input_split_sizes = data[src_rank]
input_indexes = self._size_cumsum(input_buffer.size(0), input_split_sizes, world_size)
output_buffer[output_indexes[src_rank]:output_indexes[src_rank + 1]].copy_(
input_buffer[input_indexes[dest_rank]:input_indexes[dest_rank + 1]]
)
def _size_cumsum(self, buf_size: int, sizes: Union[torch.Tensor, list[int], None], world_size: int) -> torch.Tensor:
if sizes is None or len(sizes) == 0:
sizes = torch.full(
(world_size,), buf_size // world_size, dtype=torch.int64
)
if not isinstance(sizes, torch.Tensor):
sizes = torch.tensor(sizes, dtype=torch.int64)
assert sizes.dtype == torch.int64
sizes = torch.cumsum(
torch.cat(
(
torch.tensor([0], dtype=torch.int64, device=sizes.device), sizes
),
dim=0
),
dim=0
)
return sizes
class AllReduce:
def __init__(self, op):
if op.op not in _reduce_ops:
raise NotImplementedError(
f"AllReduce op {op.op} not supported on multithreaded pg for now."
)
self.op = op.op
@torch.no_grad()
def work(self, data):
for i in range(len(data[0])):
# use rank0 as the device for sum
rank_0_device = data[0][i].device
# collect all data to the list and make them
# all on rank 0 device
tensors = [data[src_rank][i].to(rank_0_device) for src_rank in range(0, len(data))]
# now mimic reduce across all ranks
res = _reduce_ops[self.op](tensors)
# copy all the reduced value to each rank
for src_rank in range(len(data)):
data[src_rank][i].copy_(res.to(data[src_rank][i].device))
class AllGather:
@torch.no_grad()
def work(self, data):
for src_rank in range(len(data)):
in_tensor_list = data[src_rank][1]
# Can't handle all_gather with multiple tensors
assert len(in_tensor_list) == 1
src_tensor = in_tensor_list[0]
for dest in data:
dest_tensor = dest[0][0][src_rank]
dest_tensor.copy_(src_tensor)
class Scatter:
def __init__(self, src):
self.src = src
@torch.no_grad()
def work(self, data):
src_in_tensor_list = data[self.src][1]
# Can't handle scatter with multiple input tensor list
assert len(src_in_tensor_list) == 1
src_in_tensors = src_in_tensor_list[0]
for rank, each_rank_data in enumerate(data):
out_tensor_list = each_rank_data[0]
# Can't handle scatter with multiple output tensor
assert len(out_tensor_list) == 1
dest_tensor = out_tensor_list[0]
dest_tensor.copy_(src_in_tensors[rank])
class Gather:
def __init__(self, dst):
self.dst = dst
@torch.no_grad()
def work(self, data):
# Can't handle gather with multiple tensor lists
assert len(data[self.dst][0]) == 1
out_tensor_list = data[self.dst][0][0]
for rank, each_rank_data in enumerate(data):
src_in_tensor_list = each_rank_data[1]
# Can't handle gather with multiple tensor lists
assert len(src_in_tensor_list) == 1
dest_tensor = out_tensor_list[rank]
dest_tensor.copy_(src_in_tensor_list[0])
class ReduceScatter:
def __init__(self, op):
if op != dist.ReduceOp.SUM and op != dist.ReduceOp.AVG:
raise NotImplementedError(f"ReduceScatter does not support {op}")
self.op = op
@torch.no_grad()
def work(self, data):
start_reduction = [False for _ in range(len(data))]
for each_rank_data in data:
# Can't handle reduce_scatter with multiple scatter list
assert len(each_rank_data[1]) == 1
to_scatter = each_rank_data[1][0]
for i in range(len(to_scatter)):
dest_tensor_on_rank_i = data[i][0]
# Can't handle reduce_scatter with multiple output tensor
assert len(dest_tensor_on_rank_i) == 1
dst_tensor_device = dest_tensor_on_rank_i[0].device
if not start_reduction[i]:
dest_tensor_on_rank_i[0].copy_(to_scatter[i].to(dst_tensor_device))
start_reduction[i] = True
else:
dest_tensor_on_rank_i[0].add_(to_scatter[i].to(dst_tensor_device))
if self.op == dist.ReduceOp.AVG:
num_ranks = len(data)
for each_rank_data in data:
each_rank_data[0][0] /= num_ranks
class Broadcast:
def __init__(self, src):
self.src = src
@torch.no_grad()
def work(self, data):
in_tensor_list = flatten_list(data[self.src])
for i in range(len(data)):
out_tensor_list = flatten_list(data[i])
for j in range(len(in_tensor_list)):
out_tensor_list[j].copy_(in_tensor_list[j])
class Collective:
def __init__(self, world_size, collective, pg):
self._world_size = world_size
self._collective = collective
self._start_cond = threading.Condition()
self._done_cond = threading.Condition()
self._data = [None] * world_size
self._count = 0
self._done = False
self._pg = pg
def join(self, rank, data):
with self._start_cond:
self._data[rank] = data
self._count += 1
# notify rank 0
if self._count == self._world_size:
if rank > 0:
self._start_cond.notify()
if rank == 0:
self._start_cond.wait_for(
lambda: self._count == self._world_size or self._pg._terminate.is_set()
)
# SystemExit is not a subclass of Exception but BaseException
# and can be distinguished from normal exception raised from program errors
# so that we can hide it from the exception queue
if self._pg._terminate.is_set():
sys.exit("Test termination event occurs.")
with self._done_cond:
# wait for rank 0 to finish
if rank > 0:
self._done_cond.wait_for(lambda: self._done or self._pg._terminate.is_set())
if self._pg._terminate.is_set():
sys.exit("Test termination event occurs.")
else:
# copy data around
self._collective.work(self._data)
self._done = True
self._done_cond.notify_all()
return ret_work(data)
class ProcessLocalGroup(dist.ProcessGroup):
_coll_lock = threading.Lock()
_cur_coll_on_pgs = {}
_terminate = threading.Event()
@classmethod
def _start_coll(cls, collective, pg):
with cls._coll_lock:
# pg_name is unique, we use that to record the mapping between pg and collective
if pg.pg_name not in cls._cur_coll_on_pgs:
cls._cur_coll_on_pgs[pg.pg_name] = Collective(pg.size(), collective, cls)
return cls._cur_coll_on_pgs[pg.pg_name]
@classmethod
def _end_coll(cls, collective, pg):
# This is racily called by all ranks, so only one will work
with cls._coll_lock:
if pg.pg_name in cls._cur_coll_on_pgs and cls._cur_coll_on_pgs[pg.pg_name] == collective:
cls._cur_coll_on_pgs.pop(pg.pg_name)
@classmethod
def exception_handle(cls, exc):
cls._terminate.set()
for coll in cls._cur_coll_on_pgs.values():
with coll._start_cond:
coll._start_cond.notify()
with coll._done_cond:
coll._done_cond.notify_all()
@classmethod
def reset(cls):
with cls._coll_lock:
cls._cur_coll_on_pgs = {}
cls._terminate.clear()
def alltoall_base(
self,
output_buffer: torch.Tensor,
input_buffer: torch.Tensor,
output_split_sizes: Optional[list[int]],
input_split_sizes: Optional[list[int]],
opts=AllToAllOptions()
) -> torch.Tensor:
coll = ProcessLocalGroup._start_coll(AllToAllBase(), self)
res = coll.join(self._rank, (output_buffer, input_buffer, output_split_sizes, input_split_sizes))
ProcessLocalGroup._end_coll(coll, self)
return res
def alltoall(self, output_tensor_list, input_tensor_list, opts=AllToAllOptions()):
coll = ProcessLocalGroup._start_coll(AllToAll(), self)
res = coll.join(self._rank, (output_tensor_list, input_tensor_list))
ProcessLocalGroup._end_coll(coll, self)
return res
def allreduce(self, tensor_list, opts=AllreduceOptions()):
coll = ProcessLocalGroup._start_coll(AllReduce(opts.reduceOp), self)
res = coll.join(self._rank, tensor_list)
ProcessLocalGroup._end_coll(coll, self)
return res
def allreduce_coalesced(self, tensor_list, opts=AllreduceOptions()):
coll = ProcessLocalGroup._start_coll(AllReduce(opts.reduceOp), self)
res = coll.join(self._rank, tensor_list)
ProcessLocalGroup._end_coll(coll, self)
return res
def barrier(self, opts=BarrierOptions()):
return self.allreduce(tensor_list=[torch.ones(1)])
def allgather(self, output_tensors, input_tensor, opts=AllgatherOptions()):
coll = ProcessLocalGroup._start_coll(AllGather(), self)
res = coll.join(self._rank, (output_tensors, input_tensor))
ProcessLocalGroup._end_coll(coll, self)
return res
def _allgather_base(self, output_tensor, input_tensor, opts=AllgatherOptions()):
tensor_list = list(torch.chunk(output_tensor, self._world_size))
return self.allgather([tensor_list], [input_tensor], opts)
def broadcast(self, tensor_list, opts=BroadcastOptions()):
coll = ProcessLocalGroup._start_coll(Broadcast(opts.rootRank), self)
res = coll.join(self._rank, tensor_list)
ProcessLocalGroup._end_coll(coll, self)
return res
def scatter(self, output_tensors, input_tensors, opts=ScatterOptions()):
coll = ProcessLocalGroup._start_coll(Scatter(opts.rootRank), self)
res = coll.join(self._rank, (output_tensors, input_tensors))
ProcessLocalGroup._end_coll(coll, self)
return res
def gather(self, output_tensors, input_tensors, opts=ScatterOptions()):
coll = ProcessLocalGroup._start_coll(Gather(opts.rootRank), self)
res = coll.join(self._rank, (output_tensors, input_tensors))
ProcessLocalGroup._end_coll(coll, self)
return res
def reduce_scatter(self, output_tensor, scatter_list, opts=ReduceScatterOptions()):
coll = ProcessLocalGroup._start_coll(ReduceScatter(opts.reduceOp), self)
res = coll.join(self._rank, (output_tensor, scatter_list))
ProcessLocalGroup._end_coll(coll, self)
return res
def _reduce_scatter_base(self, output_tensor, input_tensor, opts=ReduceScatterOptions()):
tensor_list = list(torch.chunk(input_tensor, self._world_size))
return self.reduce_scatter([output_tensor], [tensor_list], opts)
def reduce_scatter_tensor_coalesced(self, output_tensors, input_tensors, opts=ReduceScatterOptions()):
works = [
self._reduce_scatter_base(output_tensor, input_tensor, opts)
for output_tensor, input_tensor
in zip(output_tensors, input_tensors)
]
for work in works[:-1]:
work.wait()
return works[-1]
def allgather_into_tensor_coalesced(self, output_tensor_list, input_tensor_list, opts=AllgatherOptions()):
res = None
for o_t, i_t in zip(output_tensor_list, input_tensor_list):
res = self._allgather_base(o_t, i_t)
return res
def __init__(self, rank, world_size):
super().__init__(rank, world_size)
self._rank = rank
self._world_size = world_size
world = dist.distributed_c10d._world
if isinstance(world, ThreadLocalWorld):
world = world._get_world()
self._world = weakref.ref(world)
self._ctx = torch.autograd.set_multithreading_enabled(False)
def size(self):
return self._world_size
@property
def pg_name(self):
"""
return the global registered name of the current pg in the world
"""
return self._world().pg_names[self]
@property
def group_name(self):
return self.pg_name
def getBackendName(self):
return "threaded"
def __repr__(self):
return f"ThreadedPG world_size:{self._world_size} rank:{self._rank}"
def _create_threaded_pg(prefix_store, rank, world_size, timeout):
pg = ProcessLocalGroup(rank, world_size)
# https://github.com/pytorch/pytorch/pull/103033 changed store based barrier to optional
# When device mesh involves sub groups while store based barrier is not enabled in c10d,
# even though threaded pg actual collectives are assumed to be single threaded,
# different threads may be initializing different groups,
# leading to race conditions.
# For example, if we have a mesh of [[0, 1], [2, 3]], the sub groups
# (dim 0 and 1) would be initialized in different threads independently.
# In this case we can no longer rely on class or global variables
# but have to rely on store based barrier to make sure each group
# is ready separately before we can invoke collectives in any of the groups.
# the prefix store is already per group so we pass an empty name here
_store_based_barrier(rank, prefix_store, "", world_size, timeout)
return pg
dist.Backend.register_backend("threaded", _create_threaded_pg, devices=["cpu", "cuda"])
@dataclass
class WorldData:
default_pg: dist.ProcessGroup
pg_map: dict[dist.ProcessGroup, tuple[str, Optional[Store]]]
pg_names: dict[dist.ProcessGroup, str]
pg_group_ranks: dict[dist.ProcessGroup, dict[int, int]]
pg_backend_config: dict[dist.ProcessGroup, str]
group_count: int
tags_to_pg: dict[str, list[dist.ProcessGroup]]
pg_to_tag: dict[dist.ProcessGroup, str]
pg_coalesce_state: dict[dist.ProcessGroup, list[Union[_CollOp, P2POp]]]
class ThreadLocalWorld:
_world = threading.local()
def _get_world(self) -> WorldData:
if not hasattr(ThreadLocalWorld._world, "world"):
ThreadLocalWorld._world.world = WorldData(None, {}, {}, {}, {}, 0, {}, {}, {})
return ThreadLocalWorld._world.world
@property
def default_pg(self):
return self._get_world().default_pg
@default_pg.setter
def default_pg(self, value):
self._get_world().default_pg = value
@property
def pg_map(self):
return self._get_world().pg_map
@property
def pg_names(self):
return self._get_world().pg_names
@property
def pg_group_ranks(self):
return self._get_world().pg_group_ranks
@property
def pg_backend_config(self):
return self._get_world().pg_backend_config
@property
def group_count(self) -> int:
return self._get_world().group_count
@group_count.setter
def group_count(self, value):
self._get_world().group_count = value
@property
def tags_to_pg(self):
return self._get_world().tags_to_pg
@property
def pg_to_tag(self):
return self._get_world().pg_to_tag
@property
def pg_coalesce_state(self) -> dict[dist.ProcessGroup, list[Union[_CollOp, P2POp]]]:
return self._get_world().pg_coalesce_state
_old_pg_world = None
_ctx_manager = None
def _install_threaded_pg():
global _old_pg_world
global _ctx_manager
_old_pg_world = dist.distributed_c10d._world
dist.distributed_c10d._world = ThreadLocalWorld()
_ctx_manager = torch.autograd.set_multithreading_enabled(False)
return dist.distributed_c10d._world
def _uninstall_threaded_pg():
dist.distributed_c10d._world = _old_pg_world
```
|
==========================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.00 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\nn\__init__.py
ENCODING: utf-8
```py
```
|
==============================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.00 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\nn\api\__init__.py
ENCODING: utf-8
```py
```
|
========================================================================================================================================================
SOURCE CODE FILE: remote_module_test.py
LINES: 3
SIZE: 29.68 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\nn\api\remote_module_test.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import enum
import torch
import torch.distributed.rpc as rpc
import torch.testing._internal.dist_utils as dist_utils
from torch import Tensor, nn
from torch._jit_internal import Future
from torch.distributed.nn import RemoteModule
from torch.distributed.nn.api.remote_module import _REMOTE_MODULE_PICKLED_ATTRIBUTES
from torch.distributed.nn.api.remote_module import _RemoteModule
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_utils import TemporaryFileName, TEST_WITH_ROCM
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
_PARAM_VAL = torch.nn.Parameter(torch.ones(1))
# RPC handler for querying the device on the destination worker.
def remote_device(module_rref):
for param in module_rref.local_value().parameters():
return param.device
# RPC handler for querying __dict__ on the destination worker.
def remote_module_attributes(remote_module):
return remote_module.__dict__
# RPC handler for running forward on the destination worker.
def remote_forward(remote_module, args):
return remote_module.forward(*args)
# RPC handler for running forward_async on the destination worker.
def remote_forward_async(remote_module, args):
# Since future cannot be pickled and sent over the RPC layer,
# have to wait and behave just like ``forward_sync``.
return remote_module.forward_async(*args).wait()
# RPC handler for getting training mode on the destination worker.
def get_remote_training_arg(module_rref):
return module_rref.local_value().training
class ModuleCreationMode(enum.Enum):
MODULE_CTOR_WITH_INTERFACE = "module_ctor_with_interface"
MODULE_CTOR = "module_ctor"
@torch.jit.interface
class MyModuleInterface:
def forward(
self, tensor: Tensor, number: int, word: str = "default"
) -> tuple[str, int, Tensor]:
# pyre-ignore[7]: Pyre and torch.jit.interface don't mix well
pass
@torch.jit.interface
class RemoteMyModuleInterface:
def forward(
self, tensor: Tensor, number: int, word: str = "default"
) -> tuple[str, int, Tensor]:
# pyre-ignore[7]: Pyre and torch.jit.interface don't mix well
pass
def forward_async(
self, tensor: Tensor, number: int, word: str = "default"
) -> Future[tuple[str, int, Tensor]]:
pass
class MyModule(nn.Module):
def __init__(self, first_arg, first_kwarg=-1):
super().__init__()
self.param1 = _PARAM_VAL
def forward(
self, tensor: Tensor, number: int, word: str = "default"
) -> tuple[str, int, Tensor]:
return word, number, tensor
class BadModule:
def __init__(self, first_arg, first_kwarg=-1):
pass
def create_scripted_module(first_arg, first_kwarg=-1):
module = MyModule(first_arg, first_kwarg=first_kwarg)
scripted_module = torch.jit.script(module)
return scripted_module
# Common utils for both CPU and CUDA test suites
class CommonRemoteModuleTest(RpcAgentTestFixture):
@property
def world_size(self): # Override setting in RpcAgentTestFixture
return 2
@staticmethod
def _create_remote_module_iter(remote_device, modes=None):
if modes is None:
modes = ModuleCreationMode.__members__.values()
args = (1,)
kwargs = dict(first_kwarg=2)
if ModuleCreationMode.MODULE_CTOR in modes:
remote_module = RemoteModule(remote_device, MyModule, args, kwargs)
yield remote_module
if ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE in modes:
remote_module = _RemoteModule(
remote_device,
create_scripted_module,
args,
kwargs,
_module_interface_cls=MyModuleInterface,
)
scripted_remote_module = torch.jit.script(remote_module)
yield scripted_remote_module
class RemoteModuleTest(CommonRemoteModuleTest):
@dist_utils.dist_init
def test_bad_module(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
remote_device = f"{dst_worker_name}/cpu"
args = (1,)
kwargs = dict(first_kwarg=2)
with self.assertRaisesRegex(
ValueError,
r"Expect `module_cls\(\*args, \*\*kwargs\)` returns an instance of <class nn.Module>,",
):
RemoteModule(remote_device, BadModule, args, kwargs).forward()
with self.assertRaisesRegex(
ValueError,
r"Expect `module_cls\(\*args, \*\*kwargs\)` returns an instance of <class nn.Module>,",
):
RemoteModule(remote_device, BadModule, args, kwargs).forward()
@dist_utils.dist_init
def test_forward_async(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
args = (torch.ones(1), 2, "3")
for remote_module in self._create_remote_module_iter(dst_worker_name):
ret_fut = remote_module.forward_async(*args)
ret = ret_fut.wait()
self.assertEqual(ret, tuple(reversed(args)))
@dist_utils.dist_init
def test_forward_async_script(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
scripted_remote_module = next(
self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE]
)
)
@torch.jit.script
def run_forward_async(scripted_remote_module: RemoteMyModuleInterface):
ret_fut = scripted_remote_module.forward_async(torch.ones(1), 2, "3")
ret = ret_fut.wait()
return ret
ret = run_forward_async(scripted_remote_module)
self.assertEqual(ret, ("3", 2, torch.ones(1)))
@dist_utils.dist_init
def test_forward_sync(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
args = (torch.ones(1), 2, "3")
for remote_module in self._create_remote_module_iter(dst_worker_name):
ret = remote_module.forward(*args)
self.assertEqual(ret, tuple(reversed(args)))
@dist_utils.dist_init
def test_forward_sync_script(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
scripted_remote_module = next(
self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE]
)
)
@torch.jit.script
def run_forward(scripted_remote_module: MyModuleInterface):
ret = scripted_remote_module.forward(torch.ones(1), 2, "3")
return ret
ret = run_forward(scripted_remote_module)
self.assertEqual(ret, ("3", 2, torch.ones(1)))
@dist_utils.dist_init
def test_forward_with_kwargs(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
args = (torch.ones(1), 2)
kwargs = dict(word="3")
# Only test Python nn.Module, because script module methods don't support taking kwargs.
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
ret_fut = remote_module.forward_async(*args, **kwargs)
ret = ret_fut.wait()
self.assertEqual(ret, tuple(reversed(args + ("3",))))
ret = remote_module.forward(*args, **kwargs)
self.assertEqual(ret, tuple(reversed(args + ("3",))))
@dist_utils.dist_init
def test_remote_parameters(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
# Only test Python nn.Module, because script module methods don't support ``remote_parameters``.
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
param_rrefs = remote_module.remote_parameters()
self.assertEqual(len(param_rrefs), 1)
self.assertTrue(torch.equal(param_rrefs[0].to_here(), _PARAM_VAL))
@dist_utils.dist_init
def test_get_module_rref(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
# Only test Python nn.Module, because script module methods don't support ``get_module_rref``.
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
rref = remote_module.get_module_rref()
self.assertEqual(rref, remote_module.module_rref)
for param in rref.to_here().parameters():
self.assertTrue(torch.equal(param, _PARAM_VAL))
@dist_utils.dist_init
def test_train_eval(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
remote_module.train()
ret1 = rpc.rpc_sync(dst_worker_name, get_remote_training_arg, args=(remote_module.get_module_rref(),))
self.assertEqual(ret1, True)
remote_module.eval()
ret2 = rpc.rpc_sync(dst_worker_name, get_remote_training_arg, args=(remote_module.get_module_rref(),))
self.assertEqual(ret2, False)
@dist_utils.dist_init
def test_unsupported_methods(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
with self.assertRaisesRegex(
ValueError, r"Method ``register_buffer`` not supported for RemoteModule"
):
remote_module.register_buffer("buffer", torch.ones(5))
with self.assertRaisesRegex(
ValueError,
r"Method ``register_parameter`` not supported for RemoteModule",
):
remote_module.register_parameter(
"param", torch.nn.Parameter(torch.ones(1))
)
with self.assertRaisesRegex(
ValueError, r"Method ``add_module`` not supported for RemoteModule"
):
remote_module.add_module("empty", None)
with self.assertRaisesRegex(
ValueError, r"Method ``apply`` not supported for RemoteModule"
):
fn = torch.rand((3, 3), requires_grad=False)
remote_module.apply(fn)
with self.assertRaisesRegex(
ValueError, r"Method ``cuda`` not supported for RemoteModule"
):
remote_module.cuda()
with self.assertRaisesRegex(
ValueError, r"Method ``cpu`` not supported for RemoteModule"
):
remote_module.cpu()
with self.assertRaisesRegex(
ValueError, r"Method ``type`` not supported for RemoteModule"
):
remote_module.type(torch.FloatTensor)
with self.assertRaisesRegex(
ValueError, r"Method ``float`` not supported for RemoteModule"
):
remote_module.float()
with self.assertRaisesRegex(
ValueError, r"Method ``double`` not supported for RemoteModule"
):
remote_module.double()
with self.assertRaisesRegex(
ValueError, r"Method ``bfloat16`` not supported for RemoteModule"
):
remote_module.bfloat16()
with self.assertRaisesRegex(
ValueError, r"Method ``to`` not supported for RemoteModule"
):
remote_module.to("cpu", dtype=torch.int32)
def hook(module, grad_input, grad_output):
pass
with self.assertRaisesRegex(
ValueError,
r"Method ``register_backward_hook`` not supported for RemoteModule",
):
remote_module.register_backward_hook(hook)
with self.assertRaisesRegex(
ValueError,
r"Method ``register_forward_pre_hook`` not supported for RemoteModule",
):
remote_module.register_forward_pre_hook(hook)
with self.assertRaisesRegex(
ValueError,
r"Method ``register_forward_hook`` not supported for RemoteModule",
):
remote_module.register_forward_hook(hook)
with self.assertRaisesRegex(
ValueError, r"Method ``state_dict`` not supported for RemoteModule"
):
remote_module.state_dict()
with self.assertRaisesRegex(
ValueError, r"Method ``load_state_dict`` not supported for RemoteModule"
):
remote_module.load_state_dict({})
with self.assertRaisesRegex(
ValueError,
r"Method ``parameters`` not supported for RemoteModule. Please use ``remote_parameters`` instead.",
):
remote_module.parameters()
with self.assertRaisesRegex(
ValueError,
r"Method ``named_parameters`` not supported for RemoteModule",
):
remote_module.named_parameters()
with self.assertRaisesRegex(
ValueError, r"Method ``buffers`` not supported for RemoteModule"
):
remote_module.buffers()
with self.assertRaisesRegex(
ValueError, r"Method ``named_buffers`` not supported for RemoteModule"
):
remote_module.named_buffers()
with self.assertRaisesRegex(
ValueError, r"Method ``children`` not supported for RemoteModule"
):
remote_module.children()
with self.assertRaisesRegex(
ValueError, r"Method ``named_children`` not supported for RemoteModule"
):
remote_module.named_children()
with self.assertRaisesRegex(
ValueError, r"Method ``modules`` not supported for RemoteModule"
):
remote_module.modules()
with self.assertRaisesRegex(
ValueError, r"Method ``named_modules`` not supported for RemoteModule"
):
remote_module.named_modules()
with self.assertRaisesRegex(
ValueError, r"Method ``requires_grad_`` not supported for RemoteModule"
):
remote_module.requires_grad_()
with self.assertRaisesRegex(
ValueError, r"Method ``zero_grad`` not supported for RemoteModule"
):
remote_module.zero_grad()
with self.assertRaisesRegex(
ValueError, r"Method ``share_memory`` not supported for RemoteModule"
):
remote_module.share_memory()
with self.assertRaisesRegex(
ValueError, r"Method ``extra_repr`` not supported for RemoteModule"
):
remote_module.extra_repr()
@dist_utils.dist_init
def test_send_remote_module_with_a_new_attribute_not_pickled_over_the_wire(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
# If a new attribute is added to this RemoteModule after the initialization,
# and it will be sent over the wire by RPC,
# this new field will not be pickled, because it's not specified in _REMOTE_MODULE_PICKLED_ATTRIBUTES.
# Note that adding a new attribute out of constructor should rarely happen.
# If a new attribute is added to RemoteModule constructor,
# there is a sanity check to enforce developers to add this attribute to either
# _REMOTE_MODULE_PICKLED_ATTRIBUTES or _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING.
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
new_attr_name = "new_attr"
setattr(remote_module, new_attr_name, 1)
attrs = rpc.rpc_sync(
dst_worker_name, remote_module_attributes, (remote_module,)
)
self.assertNotIn(new_attr_name, attrs)
@dist_utils.dist_init
def test_remote_module_py_pickle_not_supported(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
with TemporaryFileName() as fname:
with self.assertRaisesRegex(
RuntimeError,
"Cannot pickle RemoteModule in python pickler. RemoteModule can only be pickled when using RPC",
):
torch.save(remote_module, fname)
@dist_utils.dist_init
def test_remote_module_py_pickle_not_supported_script(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE]
):
with TemporaryFileName() as fname:
with self.assertRaisesRegex(torch.jit.Error, "can only be pickled when using RPC"):
torch.save(remote_module, fname)
class ThreeWorkersRemoteModuleTest(CommonRemoteModuleTest):
@property
def world_size(self): # Override setting in CommonRemoteModuleTest
return 3
@dist_utils.dist_init
def test_send_remote_module_over_the_wire(self):
if self.rank != 0:
return
dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size)
# Unpickled attributes include both the inherent attributes of RemoteModule
# (not inherited from the superclass) and two installed methods.
expected_unpickled_attrs = list(_REMOTE_MODULE_PICKLED_ATTRIBUTES)
expected_unpickled_attrs.append("forward_async")
expected_unpickled_attrs.append("forward")
# Create a remote module on worker1 and then pass it to worker2 over the RPC layer.
for remote_module in self._create_remote_module_iter(
dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
# Test querying some simple attributes from worker2.
attrs = rpc.rpc_sync(
dst_worker2_name, remote_module_attributes, (remote_module,)
)
self.assertListEqual(list(attrs.keys()), expected_unpickled_attrs)
self.assertEqual(attrs["on"], "worker1")
self.assertEqual(attrs["device"], "cpu")
self.assertFalse(attrs["is_device_map_set"])
self.assertFalse(attrs["is_scriptable"])
# Test the installed methods on worker1's can be initiated by worker2 over RPC layer.
# NOTE: In practice a remote module should be directly stored on the worker that runs ``forward``` or ``forward_async``,
# not have another worker to initiate forward over the RPC layer.
args = (torch.ones(1), 2, "3")
ret1 = rpc.rpc_sync(dst_worker2_name, remote_forward, (remote_module, args))
self.assertEqual(ret1, tuple(reversed(args)))
ret2 = rpc.rpc_sync(
dst_worker2_name, remote_forward_async, (remote_module, args)
)
self.assertEqual(ret2, tuple(reversed(args)))
@dist_utils.dist_init
def test_send_remote_module_over_the_wire_script_not_supported(self):
if self.rank != 0:
return
dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size)
# Unpickled attributes include both the inherent attributes of RemoteModule
# (not inherited from the superclass) and two installed methods.
expected_unpickled_attrs = list(_REMOTE_MODULE_PICKLED_ATTRIBUTES)
expected_unpickled_attrs.append("forward_async")
expected_unpickled_attrs.append("forward")
with self.assertRaisesRegex(
RuntimeError, "Passing a script RemoteModule over RPC is not supported."
):
# Create a remote module on worker1 and then pass it to worker2 over the RPC layer.
for remote_module in self._create_remote_module_iter(
dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE]
):
# Test querying some simple attributes from worker2.
rpc.rpc_sync(
dst_worker2_name, remote_module_attributes, (remote_module,)
)
@dist_utils.dist_init
def test_create_remote_module_from_module_rref(self):
if self.rank != 0:
return
dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size)
# Create a remote module on worker1 and then pass its `module_rref` to worker2 over the RPC layer.
for remote_module in self._create_remote_module_iter(
dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
remote_module2 = rpc.rpc_sync(
dst_worker2_name,
RemoteModule.init_from_module_rref,
(dst_worker2_name, remote_module.get_module_rref()),
)
args = (torch.ones(1), 2, "3")
ret1 = rpc.rpc_sync(
dst_worker1_name, remote_forward, (remote_module, args)
)
ret2 = rpc.rpc_sync(
dst_worker2_name, remote_forward, (remote_module2, args)
)
self.assertEqual(ret1, ret2)
class CudaRemoteModuleTest(CommonRemoteModuleTest):
@skip_if_lt_x_gpu(1)
@dist_utils.dist_init
def test_valid_device(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker_name = dist_utils.worker_name(dst_rank)
for remote_module in self._create_remote_module_iter(
f"{dst_worker_name}/cuda:0", modes=[ModuleCreationMode.MODULE_CTOR]
):
device = rpc.rpc_sync(
dst_worker_name, remote_device, (remote_module.module_rref,)
)
self.assertEqual(device.type, "cuda")
self.assertEqual(device.index, 0)
# Test rank works as well.
for remote_module in self._create_remote_module_iter(
f"rank:{dst_rank}/cuda:0", modes=[ModuleCreationMode.MODULE_CTOR]
):
device = rpc.rpc_sync(
dst_worker_name, remote_device, (remote_module.module_rref,)
)
self.assertEqual(device.type, "cuda")
self.assertEqual(device.index, 0)
@skip_if_lt_x_gpu(1)
@dist_utils.dist_init
def test_invalid_devices(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError,
r"Expected one of .+ device type at start of device string",
):
[
m.forward()
for m in self._create_remote_module_iter(
f"{dst_worker_name}/foo",
modes=[ModuleCreationMode.MODULE_CTOR],
)
]
if TEST_WITH_ROCM:
errorString = (r"HIP error: invalid device ordinal\n"
r"HIP kernel errors might be asynchronously reported at some other API call, "
r"so the stacktrace below might be incorrect.\n"
r"For debugging consider passing AMD_SERIALIZE_KERNEL=3")
else:
errorString = r"CUDA error: invalid device ordinal"
with self.assertRaisesRegex(
RuntimeError, errorString
):
[
m.forward()
for m in self._create_remote_module_iter(
f"{dst_worker_name}/cuda:100",
modes=[ModuleCreationMode.MODULE_CTOR],
)
]
with self.assertRaisesRegex(RuntimeError, r"Invalid device string: 'cpu2'"):
[
m.forward()
for m in self._create_remote_module_iter(
f"{dst_worker_name}/cpu2",
modes=[ModuleCreationMode.MODULE_CTOR],
)
]
with self.assertRaisesRegex(RuntimeError, r"Device string must not be empty"):
[
m.forward()
for m in self._create_remote_module_iter(
f"{dst_worker_name}/",
modes=[ModuleCreationMode.MODULE_CTOR],
)
]
with self.assertRaisesRegex(
ValueError,
r"Could not parse remote_device: worker1/cuda:0/cuda:1. The valid format is '<workername>/<device>'",
):
[
m.forward()
for m in self._create_remote_module_iter(
f"{dst_worker_name}/cuda:0/cuda:1",
modes=[ModuleCreationMode.MODULE_CTOR],
)
]
with self.assertRaisesRegex(
ValueError,
r"Could not parse remote_device: /. The valid format is '<workername>/<device>'",
):
[
m.forward()
for m in self._create_remote_module_iter(
"/",
modes=[ModuleCreationMode.MODULE_CTOR],
)
]
with self.assertRaisesRegex(
ValueError,
r"Could not parse remote_device: /cuda:0. The valid format is '<workername>/<device>'",
):
[
m.forward()
for m in self._create_remote_module_iter(
"/cuda:0",
modes=[ModuleCreationMode.MODULE_CTOR],
)
]
@skip_if_lt_x_gpu(1)
@dist_utils.dist_init
def test_input_moved_to_cuda_device(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
# These two CPU tensors (in args and kwargs) should be implicitly moved to an appropriate cuda device.
t1 = torch.ones(1)
args = (t1, 2)
t2 = t1 * 2
kwargs = dict(word=t2)
# Only test Python nn.Module, because script module methods don't support taking kwargs.
for remote_module in self._create_remote_module_iter(
f"{dst_worker_name}/cuda:0", modes=[ModuleCreationMode.MODULE_CTOR]
):
ret_fut = remote_module.forward_async(*args, **kwargs)
ret = ret_fut.wait()
self.assertEqual(ret, tuple(reversed(args + (t2,))))
# TODO: Once the RPC backend can support directly sending GPU tensors, the expected device type should be "cuda:0".
self.assertEqual(ret[0].device.type, "cpu")
self.assertEqual(ret[2].device.type, "cpu")
ret = remote_module.forward(*args, **kwargs)
self.assertEqual(ret, tuple(reversed(args + (t2,))))
# TODO: Once the RPC backend can support directly sending GPU tensors, the expected device type should be "cuda:0".
self.assertEqual(ret[0].device.type, "cpu")
self.assertEqual(ret[2].device.type, "cpu")
@skip_if_lt_x_gpu(1)
@dist_utils.dist_init
def test_input_moved_to_cuda_device_script(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
scripted_remote_module = next(
self._create_remote_module_iter(
f"{dst_worker_name}/cuda:0",
modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE],
)
)
@torch.jit.script
def run_forward(scripted_remote_module: MyModuleInterface):
ret = scripted_remote_module.forward(torch.ones(1), 2, "3")
return ret
ret = run_forward(scripted_remote_module)
self.assertEqual(ret, ("3", 2, torch.ones(1)))
# TODO: Once the RPC backend can support directly sending GPU tensors, the expected device type should be "cuda:0".
self.assertEqual(ret[2].device.type, "cpu")
```
|
===========================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.00 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\rpc\__init__.py
ENCODING: utf-8
```py
```
|
=====================================================================================================================================================
SOURCE CODE FILE: dist_autograd_test.py
LINES: 1
SIZE: 107.61 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\rpc\dist_autograd_test.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import sys
import threading
import time
from enum import Enum
import random
import torch
import torch.nn as nn
from datetime import timedelta
import torch.distributed as dist
import torch.distributed.autograd as dist_autograd
import torch.distributed.rpc as rpc
import torch.testing._internal.dist_utils
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.distributed.rpc import RRef
from torch.testing._internal.common_utils import IS_MACOS, skip_but_pass_in_sandcastle_if
from torch.testing._internal.dist_utils import (
dist_init,
initialize_pg,
wait_until_node_failure,
worker_name,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
# Right now we test up to 3-layer nested rpc calls.
# rpc_done[1] and ctx_ids[1] represent rpc is done in prev rank, and context id
# sent from prev rank respectively.
# rpc_done[2] and ctx_ids[2] represents for prev of prev rank.
# rpc_done[3] and ctx_ids[3] represents for prev of prev of prev rank.
# rpc_done[0] and ctx_ids[0] represents for current rank, but mostly not used.
rpc_done = [False, False, False, False]
ctx_ids = [-1, -1, -1, -1]
known_context_ids = set()
requires_grad_tensor = torch.ones(3, 3, requires_grad=True)
# Send rpc done info and context_id to
# dst_rank = (self.rank + rank_distance) % self.world_size
# we don't need a lock here since the GIL is held while executing remote
# python UDFs, so access is serialized across several workers.
def _set_rpc_done(ctx_id, rank_distance):
global rpc_done
global ctx_ids
global known_context_ids
rpc_done[rank_distance] = True
ctx_ids[rank_distance] = ctx_id
known_context_ids.add(ctx_id)
def _check_rpc_done(rank_distance):
while not rpc_done[rank_distance]:
time.sleep(0.1)
def _torch_ones(sizes, requires_grad=False):
return torch.ones(sizes, requires_grad=requires_grad)
# This method must be called on the rref owner, and verifies that the grad of
# rref tensor equals to the given grad.
def _compare_owner_value(context_id, rref, grad):
grads = dist_autograd.get_gradients(context_id)
x = grads[rref.local_value()]
if x.is_sparse:
assert grad.is_sparse
x = x.to_dense()
grad = grad.to_dense()
else:
assert not grad.is_sparse
return torch.equal(x, grad)
def create_tensor():
return torch.ones((3, 3), requires_grad=True)
def build_sparse_tensor(coalesce=False, requires_grad=True, dtype=torch.float32):
i = [[0, 1, 1], [2, 0, 2]]
v = [3.2, 4.1, 5.3]
tensor = torch.sparse_coo_tensor(
i, v, (3, 3), requires_grad=requires_grad, dtype=dtype
)
if coalesce:
tensor = tensor.coalesce()
return tensor
@torch.jit.script
def create_torchscript_tensor() -> torch.Tensor:
return torch.ones((3, 3)).requires_grad_()
def my_py_add(t1, t2):
return torch.add(t1, t2)
def my_scalar_add(a, b):
return a + b
def my_rref_add(rref_t1, t2):
ret = torch.add(rref_t1.local_value(), t2)
return ret
@torch.jit.script
def my_script_add(t1, t2):
return torch.add(t1, t2)
@torch.jit.script
def my_script_ref_add(ref_t1: RRef[torch.Tensor], t2: torch.Tensor) -> torch.Tensor:
t1 = ref_t1.to_here()
return torch.add(t1, t2)
def my_nested_rref_add(dst, rref_t1, t2):
return rpc.rpc_sync(dst, my_rref_add, args=(rref_t1, t2))
def ret_requires_grad():
return requires_grad_tensor
def my_py_nested_call(t1, t2, dst, world_size, hops):
next_dst = (dst + 1) % world_size
if hops > 0:
return rpc.rpc_sync(
worker_name(next_dst),
my_py_nested_call,
args=(t1, t2, next_dst, world_size, hops - 1),
)
else:
return rpc.rpc_sync(worker_name(next_dst), my_py_add, args=(t1, t2))
# after dist autograd context is cleaned up, it should be cleaned up on other
# nodes. This helper allows timeout_seconds for those RPCs to be completed, and
# ensures that all the contexts have been cleaned up in that timeframe.any
def _all_contexts_cleaned_up(timeout_seconds=10):
global known_context_ids
start = time.time()
context_id_to_raised = set()
while (
time.time() - start < timeout_seconds
and context_id_to_raised != known_context_ids
):
for context_id in known_context_ids:
try:
dist_autograd._retrieve_context(context_id)
except RuntimeError:
context_id_to_raised.add(context_id)
# all contexts have been cleaned up if trying to retrieve any context resulted in a RuntimeError.
success = context_id_to_raised == known_context_ids
return success
# This function creates a dis autograd context, run rpc_sync on the given ps,
# and then blocks until the ps has verified the grads are correctly accumulated.
def _run_trainer(rref_t1, t2, ps, rank_diff, sparse):
with dist_autograd.context() as context_id:
ret = rpc.rpc_sync(ps, my_rref_add, args=(rref_t1, t2))
if sparse:
loss = torch.sparse.sum(ret)
else:
loss = ret.sum()
dist_autograd.backward(context_id, [loss])
# prevent deleting dist autograd context
rpc.rpc_sync(ps, _set_rpc_done, args=(context_id, rank_diff))
rpc.rpc_sync(ps, _check_rpc_done, args=(0,))
# This function is the same as _run_trainer, except rpc calls torchscript
# function "my_script_ref_add" instead of python function "my_rref_add"
def _run_trainer_torchscript(rref_t1, t2, ps, rank_diff, sparse):
with dist_autograd.context() as context_id:
ret = rpc.rpc_sync(ps, my_script_ref_add, args=(rref_t1, t2))
if sparse:
loss = torch.sparse.sum(ret)
else:
loss = ret.sum()
dist_autograd.backward(context_id, [loss])
# prevent deleting dist autograd context
rpc.rpc_sync(ps, _set_rpc_done, args=(context_id, rank_diff))
rpc.rpc_sync(ps, _check_rpc_done, args=(0,))
class SimulateBackwardError(Function):
_simulate_error = True
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if SimulateBackwardError._simulate_error:
raise Exception("Simulate error on backward pass") # noqa: TRY002
else:
return input
class ExecMode(Enum):
LOCAL = 1 # Run the operation locally.
RPC_SYNC = 2 # Run the operation using rpc_sync
REMOTE = 3 # Run the operation using remote.
RPC_ASYNC = 4 # Run the operation using rpc_async
# Common utils for both CPU and CUDA test suites
class CommonDistAutogradTest(RpcAgentTestFixture):
def _exec_func_with_dst(self, dst, exec_mode, method, *args):
if ExecMode.LOCAL == exec_mode:
if len(args) == 1 and isinstance(args[0], list):
return method(*args[0])
return method(*args)
elif ExecMode.RPC_SYNC == exec_mode:
return rpc.rpc_sync(worker_name(dst), method, args=(args))
elif ExecMode.REMOTE == exec_mode:
return rpc.remote(worker_name(dst), method, args=(args)).to_here()
elif ExecMode.RPC_ASYNC == exec_mode:
fut = rpc.rpc_async(worker_name(dst), method, args=(args))
return fut.wait()
else:
raise ValueError(f"Unrecognized ExecMode {exec_mode}")
def _exec_func(self, exec_mode, method, *args):
return self._exec_func_with_dst(
self._next_rank(), exec_mode, method, *args
)
def _next_rank(self):
if hasattr(self, "dst_rank"):
self.dst_rank = (self.dst_rank + 1) % self.world_size
if self.dst_rank == self.rank:
return self._next_rank()
else:
self.dst_rank = (self.rank + 1) % self.world_size
return self.dst_rank
def _check_rpc_done(self, rank_distance):
_check_rpc_done(rank_distance)
def _verify_backwards(self, exec_mode, tensors, context_id, local_grads, *args):
if exec_mode == ExecMode.LOCAL:
torch.autograd.backward(tensors)
return [arg.grad for arg in args]
else:
self._verify_backwards_remote(tensors, context_id, local_grads, *args)
def _verify_backwards_remote(self, tensors, context_id, local_grads, *args):
dist_autograd.backward(context_id, tensors)
# Verify grads were accumulated appropriately.
grads = dist_autograd.get_gradients(context_id)
nargs = len(args)
ngrads = 0
for i in range(0, nargs):
if local_grads[i] is not None:
self.assertIn(args[i], grads)
self.assertEqual(local_grads[i], grads[args[i]])
ngrads += 1
else:
self.assertNotIn(args[i], grads)
self.assertEqual(ngrads, len(grads))
def _test_graph(self, fn, exec_mode, sparse):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
if sparse:
t1 = build_sparse_tensor()
t2 = build_sparse_tensor()
else:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(worker_name(dst_rank), fn, args=(t1, t2))
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), fn, args=(t1, t2)
).to_here()
else:
raise ValueError(f"Unrecognized ExecMode {exec_mode}")
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# Verify graph for current context id.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(1, len(recv_functions))
self._verify_graph_for_first_rpc_call(
next(iter(send_functions.values())),
next(iter(recv_functions.values())),
t1,
t2,
ret,
)
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
# Verify graph for previous context id.
ctx = dist_autograd._retrieve_context(ctx_ids[1])
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
self._verify_graph_for_rpc_call_exec(next(iter(send_functions.values())))
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
# autograd context should be cleaned up by now.
with self.assertRaises(RuntimeError):
ctx = dist_autograd._retrieve_context(context_id)
# No autograd context available.
with self.assertRaises(RuntimeError):
ctx = dist_autograd._current_context()
# 3-layer nested calls
def _test_graph_for_py_nested_call(self, exec_mode, sparse):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
if sparse:
t1 = build_sparse_tensor(requires_grad=True)
t2 = build_sparse_tensor(requires_grad=True)
else:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_py_nested_call,
args=(t1, t2, dst_rank, self.world_size, 1),
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank),
my_py_nested_call,
args=(t1, t2, dst_rank, self.world_size, 1),
).to_here()
else:
raise ValueError(f"Unrecognized ExecMode {exec_mode}")
# Barrier to ensure all RPCs are done.
dist.barrier()
for rd in [1, 2, 3]:
rpc.rpc_sync(
worker_name((self.rank + rd) % self.world_size),
_set_rpc_done,
args=(context_id, rd),
)
# Barrier to ensure all set_rpc_done have completed.
dist.barrier()
# For self.rank, it has 4 graphs to verify
# One is for current context id when this rank send first rpc call.
# Second one is for prev context id when this rank make 1st nested
# call.
# Third one is for prev prev context id when this rank make
# 2nd nested call.
# Last one is for prev prev prev context id when this rank
# execute the torch.add() operator.
# Verify first graph for current context id.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(1, len(recv_functions))
self._verify_graph_for_first_rpc_call(
next(iter(send_functions.values())),
next(iter(recv_functions.values())),
t1,
t2,
ret,
)
# Verify second graph for 1st nested call.
ctx = dist_autograd._retrieve_context(ctx_ids[1])
self._verify_graph_for_nested_rpc_call(ctx)
# Verify third graph for 2nd nested call.
ctx = dist_autograd._retrieve_context(ctx_ids[2])
self._verify_graph_for_nested_rpc_call(ctx)
# verify last graph for rpc call execution.
ctx = dist_autograd._retrieve_context(ctx_ids[3])
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
self._verify_graph_for_rpc_call_exec(next(iter(send_functions.values())))
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
# Rank0->Rank1->Rank0
def _test_graph_for_py_nested_call_itself(self, exec_mode, sparse):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
if sparse:
t1 = build_sparse_tensor(requires_grad=True)
t2 = build_sparse_tensor(requires_grad=True)
else:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_py_nested_call,
args=(
t1,
t2,
(self.rank - 1 + self.world_size) % self.world_size,
self.world_size,
0,
),
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank),
my_py_nested_call,
args=(
t1,
t2,
(self.rank - 1 + self.world_size) % self.world_size,
self.world_size,
0,
),
).to_here()
else:
raise ValueError(f"Unrecognized ExecMode {exec_mode}")
rpc.rpc_sync(
worker_name((self.rank + 1) % self.world_size),
_set_rpc_done,
args=(context_id, 1),
)
# For self.rank, it has 2 graphs to verify.
# One is for current context id when this rank send first rpc
# call and execute the torch.add() operator.
# Another one is for prev context id when this rank make
# nested call.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(2, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(2, len(recv_functions))
self._verify_graph_for_first_rpc_call(
next(iter(send_functions.values())),
list(recv_functions.values())[1],
t1,
t2,
ret,
)
self._verify_graph_for_rpc_call_exec(list(send_functions.values())[1])
# Verify two pairs of send and recv functions for nested
# call
self._check_rpc_done(1)
ctx = dist_autograd._retrieve_context(ctx_ids[1])
self._verify_graph_for_nested_rpc_call(ctx)
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
def _test_no_graph_with_tensors_not_require_grad(self, exec_mode, sparse):
initialize_pg(self.file_init_method, self.rank, self.world_size)
dst_rank = (self.rank + 1) % self.world_size
with dist_autograd.context() as context_id:
if sparse:
t1 = build_sparse_tensor(requires_grad=False)
t2 = build_sparse_tensor(requires_grad=False)
else:
t1 = torch.ones(3, 3, requires_grad=False)
t2 = torch.zeros(3, 3, requires_grad=False)
if ExecMode.RPC_SYNC == exec_mode:
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(t1, t2)
)
elif ExecMode.REMOTE == exec_mode:
rpc.remote(
worker_name(dst_rank), torch.add, args=(t1, t2)
).to_here()
else:
raise ValueError(f"Unrecognized ExecMode {exec_mode}")
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
ctx = dist_autograd._current_context()
send_functions = ctx._send_functions()
self.assertEqual(len(send_functions), 0)
recv_functions = ctx._recv_functions()
self.assertEqual(len(recv_functions), 0)
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
# NB: RRef.to_here() always passes the autograd context to the
# the callee, as the caller does not know whether the return
# value would contain a requires_grad tensor or not.
#
# rpc/remote with udf (_set_rpc_done here) also always passes the
# autograd context to the callee due to the same reason.
self.assertNotEqual(-1, dist_autograd._retrieve_context(ctx_ids[1]))
dist.barrier()
def _test_rpc_complex_args(self, exec_mode, sparse):
with dist_autograd.context():
num_tensors = 10
tensors = []
for i in range(num_tensors):
if sparse:
tensor = build_sparse_tensor(requires_grad=(i % 2 == 0))
else:
tensor = torch.ones(3, 3, requires_grad=(i % 2 == 0))
tensors.append(tensor)
dst_rank = self._next_rank()
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.stack, args=(tensors,)
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), torch.stack, args=(tensors,)
).to_here()
else:
raise ValueError(f"Unrecognized ExecMode {exec_mode}")
self.assertEqual(torch.stack(tensors), ret)
# Verify appropriate tensors have been attached the autograd graph.
next_funcs = next(iter(dist_autograd._current_context()._send_functions().values())).next_functions
for i in range(len(next_funcs)):
self.assertEqual(
"torch::autograd::AccumulateGrad", next_funcs[i][0].name()
)
self.assertEqual(tensors[i], next_funcs[i][0].variable)
# Verify that the worker id has been recorded in the context
ctx = dist_autograd._current_context()
worker_ids = ctx._known_worker_ids()
self.assertEqual(len(worker_ids), 1)
self.assertEqual(worker_ids, {dst_rank})
def context_cleanup_test_helper(self, rpc_args, func, nested=False):
initialize_pg(self.file_init_method, self.rank, self.world_size)
# test that in dist autograd, in the case that tensors communicated over RPC do
# NOT require grad, we still cleanup the dist autograd contexts created
# on other nodes. This is because the autograd context is still
# communicated over RPC even if tensor arguments do not require grad, as
# it is possible that the response could.
if nested:
dst_rank = (self.rank + 1) % self.world_size
nested_dst_rank = (dst_rank + 1) % self.world_size
dst_ranks = {dst_rank}
else:
dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank}
with dist_autograd.context() as context_id:
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), func, args=rpc_args)
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
if nested:
rpc.rpc_sync(
worker_name(nested_dst_rank),
_set_rpc_done,
args=(context_id, 2),
)
# the thread's context id should be cleaned up
with self.assertRaises(RuntimeError):
dist_autograd._retrieve_context(context_id)
# Ensure all peers have finished mutating the
# `known_context_ids` set.
dist.barrier()
# check that all contexts have been cleaned up.
success = _all_contexts_cleaned_up()
self.assertTrue(success)
def _backward_no_grad_on_tensor(self, t1, t2, sparse):
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
torch.add,
args=(t1, t2))
if sparse:
loss = torch.sparse.sum(loss)
else:
loss = loss.sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
self.assertIsNone(t1.grad)
self.assertIsNone(t2.grad)
# Now populate .grad with local autograd engine and
# verify dist autograd doesn't mess with it.
loss_local = torch.add(t1, t2)
if sparse:
loss_local = torch.sparse.sum(loss_local)
else:
loss_local = loss_local.sum()
loss_local.backward()
self.assertIsNotNone(t1.grad)
self.assertIsNotNone(t2.grad)
t1_grad_before = t1.grad
t2_grad_before = t2.grad
dist_autograd.backward(context_id, [loss])
self.assertEqual(t1_grad_before, t1.grad)
self.assertEqual(t2_grad_before, t2.grad)
# The current rank first creates a tensor on the rref_owner, and then passes
# the rref with another tensor to the callee to run either my_rref_add or
# my_nested_rref_add, depending on whether the callee is the rref owner.
# The grad of tensor lives on the current rank, and the grad of the rref
# tensor lives on the rref owner.
def _backward_rref(self, callee, rref_owner, t1, t2, local_grads, sparse):
local_ret = torch.add(t1, t2)
if sparse:
local_ret = torch.sparse.sum(local_ret)
else:
local_ret = local_ret.sum()
local_ret.backward()
with dist_autograd.context() as context_id:
if sparse:
rref_t1 = rpc.remote(
rref_owner, build_sparse_tensor, args=(False, True,)
)
else:
rref_t1 = rpc.remote(
rref_owner, _torch_ones, args=((3, 3),), kwargs={"requires_grad": True}
)
if callee == rref_owner:
rref = rpc.remote(callee, my_rref_add, args=(rref_t1, t2))
else:
rref = rpc.remote(
callee, my_nested_rref_add, args=(rref_owner, rref_t1, t2)
)
ret = rref.to_here()
if sparse:
ret = torch.sparse.sum(ret)
else:
ret = ret.sum()
dist_autograd.backward(context_id, [ret])
# verify grads on caller
grads = dist_autograd.get_gradients(context_id)
self.assertIn(t2, grads)
self.assertEqual(grads[t2], t2.grad)
# verify grads on rref owner
self.assertTrue(
rpc.rpc_sync(
rref_owner,
_compare_owner_value,
args=(context_id, rref_t1, t1.grad),
)
)
# In this test, every rank will serve as a parameter server (ps) and a
# driver, and then kicks off trainers on the other three ranks. So, we have:
# ps = rank0 with trainers = rank1/2/3
# ps = rank2 with trainers = rank2/3/0
# ps = rank3 with trainers = rank3/0/1
# ps = rank4 with trainers = rank0/1/2
#
# These four test ps-trainer groups run on completely separate autograd
# graphs, but they share the same set of underlying RpcAgents.
def _test_trainer_ps(self, create_ref_fn, trainer_fn, sparse):
if sparse:
t1 = build_sparse_tensor(requires_grad=True)
t2 = build_sparse_tensor(requires_grad=True)
else:
t1 = torch.ones((3, 3), requires_grad=True)
t2 = torch.zeros((3, 3), requires_grad=True)
local_ret = torch.add(t1, t2)
if sparse:
torch.sparse.sum(local_ret).backward()
else:
local_ret.sum().backward()
# create rref on self
rref_t1 = rpc.remote(
worker_name(self.rank),
create_ref_fn,
args=())
# kick off forward and backward pass on three other workers (trainers)
rank_diffs = [1, 2, 3]
futures = [
rpc.rpc_async(
worker_name((self.rank + rank_diff) % self.world_size),
trainer_fn,
args=(rref_t1, t2, worker_name(self.rank), rank_diff, sparse),
) for rank_diff in rank_diffs
]
# check if the trainers have done with their backward pass
for rank_diff in rank_diffs:
self._check_rpc_done(rank_diff)
# trainers are done and holding the context for verification
for rank_diff in rank_diffs:
# make sure grads are accumulated for the same tensors and values
# are all correct
ctx_id = ctx_ids[rank_diff]
grads = dist_autograd.get_gradients(ctx_id)
local_t1 = rref_t1.to_here()
self.assertIn(local_t1, grads)
self.assertEqual(grads[local_t1], t1.grad)
# unblock trainers
_set_rpc_done(None, 0)
# wait until all trainers are done
torch.futures.wait_all(futures)
def _backward_multiple_round_trips(self, t1, t2, t3, t4, t5, local_grads, sparse):
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
# Multiple RPCs between different nodes.
val = self._exec_func(exec_mode, torch.add, t1, t2)
val = self._exec_func(exec_mode, torch.mul, t3, val)
s1 = self._exec_func(exec_mode, torch.stack, (t4, val))
s2 = self._exec_func(exec_mode, torch.stack, (t5, val))
if sparse:
val = self._exec_func(exec_mode, torch.mul, s1, s2)
val = self._exec_func(exec_mode, torch.mul, val, val)
loss = torch.sparse.sum(val)
else:
val = self._exec_func(exec_mode, torch.bmm, s1, s2)
val = self._exec_func(exec_mode, torch.matmul, val, val)
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t3, t4, t5
)
local_grads = ret if ret else local_grads
def _backward_different_dtypes(self, t1, t2, sparse):
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
loss = self._exec_func(exec_mode, torch.add, t1, t2)
if sparse:
loss = torch.sparse.sum(loss)
else:
loss = loss.sum()
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
# Run the same code locally and with dist autograd and verify gradients
# are same.
def _backward_simple_python_udf(self, t1, t2, sparse):
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(exec_mode, my_py_add, t1, t2)
if sparse:
loss = torch.sparse.sum(ret)
else:
loss = ret.sum()
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
# Run the same code locally and with dist autograd and verify gradients
# are same.
def _backward_simple_script_call(self, t1, t2, sparse):
local_grads = None
for exec_mode in [
ExecMode.LOCAL,
ExecMode.RPC_SYNC,
ExecMode.RPC_ASYNC,
ExecMode.REMOTE,
]:
with dist_autograd.context() as context_id:
forward_ret = self._exec_func(exec_mode, my_script_add, t1, t2)
if sparse:
loss = torch.sparse.sum(forward_ret)
else:
loss = forward_ret.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
def _nested_backward_accumulate_grads(self, t1, t2, sparse):
with dist_autograd.context() as context_id:
ret = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._test_nested_backward_accumulate_grads,
args=(t1, t2, self._next_rank()),
)
if sparse:
loss = torch.sparse.sum(ret)
else:
loss = ret.sum()
# Run backward twice.
dist_autograd.backward(context_id, [loss], retain_graph=True)
dist_autograd.backward(context_id, [loss])
def _backwards_nested_python_udf(self, t1, t2, sparse):
t3 = t1 * t2
t4 = t1 + t2
res = t3 + t4
loss = t1 * t2 * t3 * t4 * res
if sparse:
loss = torch.sparse.sum(loss)
else:
loss = loss.sum()
torch.autograd.backward([loss])
# Now run distributed autograd.
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._nested_python_udf,
args=(t1, t2, self._next_rank()),
)
if sparse:
loss = torch.sparse.sum(loss)
else:
loss = loss.sum()
dist_autograd.backward(context_id, [loss])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(t1.grad, grads[t1])
self.assertEqual(t2.grad, grads[t2])
def _mixed_requires_grad(self, t1, t2, sparse):
for exec_mode in [ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(
exec_mode, DistAutogradTest._mixed_requires_grad_operaton, t1, t2
)
self.assertEqual(t1 * t2, ret)
if sparse:
loss = torch.sparse.sum(ret)
else:
loss = ret.sum()
dist_autograd.backward(context_id, [loss])
self.assertTrue(t1.requires_grad)
self.assertFalse(t2.requires_grad)
grads = dist_autograd.get_gradients(context_id)
self.assertIn(t1, grads)
self.assertNotIn(t2, grads)
self.assertEqual(t2, grads[t1])
def _multiple_backward(self, t1, t2, sparse):
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
torch.add,
args=(t1, t2))
if sparse:
loss = torch.sparse.sum(loss)
else:
loss = loss.sum()
# Run backward in a loop multiple times.
for _ in range(1000):
dist_autograd.backward(context_id, [loss], retain_graph=True)
# For current context, this rank sends t1 and t2 tensors to dst_rank,
# then get t3 = torch.add(t1, t2) result tensor.
# For the current context in this rank, it expects graph like this:
# send function:
# rpcSendBackward
# / \
# t1.AccumulateGrad t2.AccumulateGrad
#
# recv function:
#
# |
# t3.rpcRecvBackward
#
def _verify_graph_for_first_rpc_call(
self, send_function, recv_function, t1, t2, ret
):
# Retrieve the next functions in the graph.
next_funcs = send_function.next_functions
self.assertEqual(2, len(next_funcs))
# We should now hit t1 and t2 in the autograd graph.
self.assertEqual("torch::autograd::AccumulateGrad", next_funcs[0][0].name())
self.assertEqual(t1, next_funcs[0][0].variable)
self.assertEqual(0, next_funcs[0][1])
self.assertEqual("torch::autograd::AccumulateGrad", next_funcs[1][0].name())
self.assertEqual(t2, next_funcs[1][0].variable)
self.assertEqual(0, next_funcs[1][1])
# Test recv functions.
self.assertEqual(ret.grad_fn, recv_function)
# Run the same code locally and with dist autograd and verify gradients
# are same.
def _backward_simple(self, dst, t1, t2, local_grads, sparse):
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func_with_dst(
dst, exec_mode, torch.add, t1, t2
)
if sparse:
loss = torch.sparse.sum(ret)
else:
loss = ret.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
# For a context passed from previous nested chain calls, this rank
# receives two tensors t1 and t2, executes torch.add(t1, t2) and sends
# result tensor t3 back.
# For this context in this rank, it expects graph like this:
# send and recv functions:
# rpcSendBackward
# |
# t3.AddBackward0
# / \
# t1.recvRpcBackward t2.recvRpcBackward
def _verify_graph_for_rpc_call_exec(self, send_function):
# Verify next function is AddBackward0
next_funcs = send_function.next_functions
self.assertEqual(1, len(next_funcs))
add_backward_fn = next_funcs[0][0]
self.assertEqual("AddBackward0", add_backward_fn.name())
# Verify the next two functions are the same recv backward function.
next_funcs = add_backward_fn.next_functions
self.assertEqual(2, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[1][0].name()
)
self.assertEqual(next_funcs[0][0], next_funcs[1][0])
# For a context passed from previous nested chain calls, this rank
# receives two tensors t1 and t2, forwards t1 and t2 tensors using
# nested rpc call to next dst. In return route, receive result tensor t3
# from next dst and forwarding t3 back to previous calls.
# For this context in this rank, it expects graph like this:
# send and recv functions for receiving and forwarding t1 and t2:
# rpcSendBackward
# / \
# t1.recvRpcBackward t2.recvRpcBackward
# send and recv functions for receiving and forwarding t3:
# rpcSendBackward
# |
# t3.recvRpcBackward
def _verify_graph_for_nested_rpc_call(self, ctx):
send_functions = ctx._send_functions()
self.assertEqual(2, len(send_functions))
# For send function when making nest rpc call,
# next functions of the send function are two recv functions
# for received two tensors from previous call
next_funcs = next(iter(send_functions.values())).next_functions
self.assertEqual(2, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[1][0].name()
)
self.assertEqual(next_funcs[0][0], next_funcs[1][0])
# For send function when returning response to previous call
# next function of the send function is the recv function
# for received tensor result returned from nested call
next_funcs = list(send_functions.values())[1].next_functions
self.assertEqual(1, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
class TensorPipeAgentDistAutogradTest(CommonDistAutogradTest):
# Sparse tests only work with TensorPipeAgent.
@dist_init
def test_graph_for_builtin_call_sparse(self):
self._test_graph(torch.add, ExecMode.RPC_SYNC, True)
@dist_init
def test_graph_for_python_call_sparse(self):
self._test_graph(my_py_add, ExecMode.RPC_SYNC, True)
@dist_init
def test_graph_for_builtin_remote_call_sparse(self):
self._test_graph(torch.add, ExecMode.REMOTE, True)
@dist_init
def test_graph_for_python_remote_call_sparse(self):
self._test_graph(my_py_add, ExecMode.REMOTE, True)
@dist_init
def test_graph_for_py_nested_call_sparse(self):
self._test_graph_for_py_nested_call(ExecMode.RPC_SYNC, True)
@dist_init
def test_graph_for_py_nested_remote_call_sparse(self):
self._test_graph_for_py_nested_call(ExecMode.REMOTE, True)
@dist_init
def test_graph_for_py_nested_call_itself_sparse(self):
self._test_graph_for_py_nested_call_itself(ExecMode.RPC_SYNC, True)
@dist_init
def test_graph_for_py_nested_remote_call_itself_sparse(self):
self._test_graph_for_py_nested_call_itself(ExecMode.REMOTE, True)
@dist_init
def test_no_graph_with_tensors_not_require_grad_sparse(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.RPC_SYNC, True)
@dist_init
def test_no_graph_with_tensors_not_require_grad_remote_sparse(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.REMOTE, True)
@dist_init
def test_rpc_complex_args_sparse(self):
self._test_rpc_complex_args(ExecMode.RPC_SYNC, True)
@dist_init
def test_remote_complex_args_sparse(self):
self._test_rpc_complex_args(ExecMode.REMOTE, True)
@dist_init
def test_context_cleanup_tensor_with_grad_sparse(self):
t1 = build_sparse_tensor(requires_grad=True)
t2 = build_sparse_tensor(requires_grad=True)
self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add)
@dist_init
def test_context_cleanup_tensor_no_grad_sparse(self):
t1 = build_sparse_tensor(requires_grad=False)
self.context_cleanup_test_helper(rpc_args=(t1, t1), func=torch.add)
@dist_init
def test_context_cleanup_nested_rpc_sparse(self):
t1 = build_sparse_tensor(requires_grad=True)
t2 = build_sparse_tensor(requires_grad=True)
dst_rank = (self.rank + 1) % self.world_size
args = (t1, t2, dst_rank, self.world_size, 0)
self.context_cleanup_test_helper(
rpc_args=args, func=my_py_nested_call, nested=True
)
@dist_init
def test_backward_no_grad_on_tensor_sparse(self):
self._backward_no_grad_on_tensor(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
True
)
@dist_init
def test_backward_simple_sparse(self):
self._backward_simple(
self._next_rank(),
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
None,
True
)
@dist_init
def test_backward_simple_self_sparse(self):
self._backward_simple(
self.rank,
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
None,
True
)
@dist_init
def test_backward_rref_multi_sparse(self):
if self.rank > 0:
callee = "worker0"
rref_owner = callee
self._backward_rref(
callee,
rref_owner,
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
None,
True
)
@dist_init
def test_backward_rref_sparse(self):
callee = worker_name(self._next_rank())
rref_owner = callee
self._backward_rref(
callee,
rref_owner,
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
None,
True
)
@dist_init
def test_backward_rref_nested_sparse(self):
callee = worker_name((self.rank + 1) % self.world_size)
rref_owner = worker_name((self.rank + 2) % self.world_size)
self._backward_rref(
callee,
rref_owner,
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
None,
True
)
@dist_init
def test_trainer_ps_sparse(self):
self._test_trainer_ps(
build_sparse_tensor,
_run_trainer,
True
)
@dist_init
def test_backward_multiple_round_trips_sparse(self):
self._backward_multiple_round_trips(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=False),
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=False),
build_sparse_tensor(requires_grad=True),
None,
True
)
@dist_init
def test_backward_different_dtypes_sparse(self):
self._backward_different_dtypes(
build_sparse_tensor(requires_grad=True, dtype=torch.float32),
build_sparse_tensor(requires_grad=True, dtype=torch.float64),
True
)
@dist_init
def test_backward_simple_python_udf_sparse(self):
self._backward_simple_python_udf(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
True
)
@dist_init
def test_backward_simple_script_call_sparse(self):
self._backward_simple_script_call(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
True
)
@dist_init
def test_nested_backward_accumulate_grads_sparse(self):
self._nested_backward_accumulate_grads(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
True
)
@dist_init
def test_backwards_nested_python_udf_sparse(self):
# Run equivalent of _nested_python_udf locally.
self._backwards_nested_python_udf(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
True
)
@dist_init
def test_mixed_requires_grad_sparse(self):
self._mixed_requires_grad(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=False),
True
)
@dist_init
def test_multiple_backward_sparse(self):
self._multiple_backward(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
True
)
@dist_init
def test_embedding_bag_with_no_grad_tensors(self):
dst = self._next_rank()
remote_embedding = rpc.remote(
worker_name(dst),
torch.nn.EmbeddingBag,
args=(16, 16),
kwargs={"mode": "sum", "sparse": True},
)
local_embedding = torch.nn.EmbeddingBag(16, 16, mode="sum", sparse=True)
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
# requires_grad = True to record send/recv functions
per_sample_weights = torch.rand((8), requires_grad=True)
offsets = torch.LongTensor([0, 4])
local_res = local_embedding(input, offsets, per_sample_weights)
# Run backward twice.
torch.autograd.backward([local_res.sum()], retain_graph=True)
torch.autograd.backward([local_res.sum()])
local_grad = local_embedding.weight.grad
with dist_autograd.context() as context_id:
res = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._call_remote_embedding,
args=(remote_embedding, input, offsets, per_sample_weights),
)
# Run backward twice to test accumulation of sparse gradients.
dist_autograd.backward(context_id, [res.sum()], retain_graph=True)
dist_autograd.backward(context_id, [res.sum()])
remote_grad = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._get_grad,
args=(remote_embedding, context_id),
)
self.assertEqual(local_grad, remote_grad)
class DistAutogradTest(CommonDistAutogradTest):
@dist_init
def test_autograd_context(self):
# Verify max possible id.
max_auto_increment = 281474976710655
self.assertEqual(
max_auto_increment + (self.worker_id << 48), dist_autograd._get_max_id()
)
context_ids = []
for _ in range(200):
with dist_autograd.context() as context_id:
self.assertEqual(
context_id,
dist_autograd._retrieve_context(context_id)._context_id(),
)
# First 16 bits should be worker_id.
self.assertEqual(self.worker_id, context_id >> 48)
context_ids.append(context_id)
for context_id in context_ids:
with self.assertRaisesRegex(
RuntimeError,
f"Could not find autograd context with id: {context_id}",
):
dist_autograd._retrieve_context(context_id)
@dist_init
def test_nested_context(self):
with dist_autograd.context():
# Nested contexts not supported.
with self.assertRaisesRegex(
RuntimeError, "Already have an autograd context id for this thread"
):
with dist_autograd.context():
pass
@dist_init
def test_graph_for_builtin_call(self):
self._test_graph(torch.add, ExecMode.RPC_SYNC, False)
@dist_init
def test_graph_for_python_call(self):
self._test_graph(my_py_add, ExecMode.RPC_SYNC, False)
@dist_init
def test_graph_for_builtin_remote_call(self):
self._test_graph(torch.add, ExecMode.REMOTE, False)
@dist_init
def test_graph_for_python_remote_call(self):
self._test_graph(my_py_add, ExecMode.REMOTE, False)
@dist_init
def test_graph_for_py_nested_call(self):
self._test_graph_for_py_nested_call(ExecMode.RPC_SYNC, False)
@dist_init
def test_graph_for_py_nested_remote_call(self):
self._test_graph_for_py_nested_call(ExecMode.REMOTE, False)
@dist_init
def test_graph_for_py_nested_call_itself(self):
self._test_graph_for_py_nested_call_itself(ExecMode.RPC_SYNC, False)
@dist_init
def test_graph_for_py_nested_remote_call_itself(self):
self._test_graph_for_py_nested_call_itself(ExecMode.REMOTE, False)
@dist_init
def test_no_graph_with_tensors_not_require_grad(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.RPC_SYNC, False)
@dist_init
def test_no_graph_with_tensors_not_require_grad_remote(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.REMOTE, False)
def _test_grad_only_on_return_value(self, exec_mode):
initialize_pg(self.file_init_method, self.rank, self.world_size)
dst_rank = (self.rank + 1) % self.world_size
with dist_autograd.context() as context_id:
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(worker_name(dst_rank), ret_requires_grad)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), ret_requires_grad
).to_here()
else:
raise ValueError(f"Unrecognized ExecMode {exec_mode}")
dist_autograd.backward(context_id, [ret.sum()])
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
grads = dist_autograd.get_gradients(ctx_ids[1])
self.assertEqual(1, len(grads))
self.assertIn(requires_grad_tensor, grads)
self.assertEqual(torch.ones_like(ret), grads[requires_grad_tensor])
# due to the above get_gradients call, ensure that dist autograd
# contexts aren't cleaned up until all workers exit context managers
dist.barrier()
@dist_init
def test_grad_only_on_return_value(self):
self._test_grad_only_on_return_value(ExecMode.RPC_SYNC)
@dist_init
def test_grad_only_on_return_value_remote(self):
self._test_grad_only_on_return_value(ExecMode.REMOTE)
@dist_init
def test_rpc_complex_args(self):
self._test_rpc_complex_args(ExecMode.RPC_SYNC, False)
@dist_init
def test_remote_complex_args(self):
self._test_rpc_complex_args(ExecMode.REMOTE, False)
@dist_init
def test_context_cleanup_tensor_with_grad(self):
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add)
@dist_init
def test_context_cleanup_tensor_no_grad(self):
t1 = torch.ones(3, 3, requires_grad=False)
self.context_cleanup_test_helper(rpc_args=(t1, t1), func=torch.add)
@dist_init
def test_context_cleanup_no_tensors(self):
self.context_cleanup_test_helper(rpc_args=(1, 1), func=my_scalar_add)
@dist_init
def test_context_cleanup_nested_rpc(self):
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
dst_rank = (self.rank + 1) % self.world_size
args = (t1, t2, dst_rank, self.world_size, 0)
self.context_cleanup_test_helper(
rpc_args=args, func=my_py_nested_call, nested=True
)
@dist_init
def test_worker_ids_recorded(self):
dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank}
with dist_autograd.context() as context_id:
# if no tensors require grad, we should still record worker_ids, as
# the autograd context ID is still passed to other workers.
t1 = torch.ones(3, 3, requires_grad=False)
t2 = torch.zeros(3, 3, requires_grad=False)
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), torch.add, args=(t1, t2))
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# all worker_ids in dst_ranks should be recorded.
ctx = dist_autograd._current_context()
worker_ids = ctx._known_worker_ids()
self.assertEqual(worker_ids, dst_ranks)
# worker_ids should be recorded when tensors do require grad
t1.requires_grad = True
t2.requires_grad = True
for dst_rank in dst_ranks:
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(t1, t2)
)
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# all worker_ids in dst_ranks should be recorded.
worker_ids = ctx._known_worker_ids()
self.assertEqual(worker_ids, dst_ranks)
@dist_init
def test_dist_autograd_profiling(self):
with dist_autograd.context() as context_id:
t1 = torch.rand(3, 3, requires_grad=True)
t2 = torch.rand(3, 3, requires_grad=True)
loss = rpc.rpc_sync(worker_name(self._next_rank()), torch.add, args=(t1, t2)).sum()
with torch.autograd.profiler.profile() as p:
dist_autograd.backward(context_id, [loss])
function_events = p.function_events
def get_event(partial_key):
return next(event for event in function_events if partial_key in event.name)
send_event = get_event("SendRpcBackward")
recv_event = get_event("RecvRpcBackward")
backward_event = get_event("torch::distributed::autograd::backward")
# There should be at least 1 send and recv_events each, corresponding to send/recv functions executed.
self.assertEqual(send_event.count, 1)
self.assertEqual(recv_event.count, 1)
# The CPU total for backward event should be great than send and recv, since
# applying those functions in the backwards pass is a subset of the entire backward pass.
self.assertGreater(backward_event.cpu_time_total, send_event.cpu_time_total)
self.assertGreater(backward_event.cpu_time_total, recv_event.cpu_time_total)
@dist_init
def test_error_in_context(self):
with dist_autograd.context():
t1 = torch.rand(3, 3, requires_grad=True)
t2 = torch.rand(6, 6, requires_grad=True)
with self.assertRaises(RuntimeError):
# This should throw an error since matrix sizes don't match.
rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(t1, t2)
)
@dist_init
def test_backward_no_grad_on_tensor(self):
self._backward_no_grad_on_tensor(
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
False
)
@dist_init
def test_backward_simple(self):
self._backward_simple(
self._next_rank(),
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
None,
False
)
@dist_init
def test_backward_simple_self(self):
self._backward_simple(
self.rank,
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
None,
False
)
@dist_init
def test_backward_rref(self):
callee = worker_name(self._next_rank())
rref_owner = callee
self._backward_rref(
callee,
rref_owner,
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
None,
False
)
@dist_init
def test_backward_rref_multi(self):
if self.rank > 0:
callee = "worker0"
rref_owner = callee
self._backward_rref(
callee,
rref_owner,
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
None,
False
)
@dist_init
def test_backward_rref_nested(self):
callee = worker_name((self.rank + 1) % self.world_size)
rref_owner = worker_name((self.rank + 2) % self.world_size)
self._backward_rref(
callee,
rref_owner,
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
None,
False
)
@dist_init
def test_trainer_ps(self):
self._test_trainer_ps(
create_tensor,
_run_trainer,
False
)
@dist_init
def test_trainer_ps_torchscript_functions(self):
# TODO, need more investigation
# there is rref leak when shutting down, suspect it is because
# ref as arg is passed to pybind boundary, and the ref is not garbage
# collected by python when calling shutdown()
import torch.distributed.rpc.api as api
api._ignore_rref_leak = True
self._test_trainer_ps(create_torchscript_tensor, _run_trainer_torchscript, False)
@dist_init
def test_backward_multiple_round_trips(self):
self._backward_multiple_round_trips(
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3)),
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3)),
torch.rand((3, 3), requires_grad=True),
None,
False
)
@dist_init
def test_backward_different_tensor_dims(self):
local_grads = None
t1 = torch.rand((4, 6), requires_grad=True)
t2 = torch.rand((6, 5))
t3 = torch.rand((5, 7), requires_grad=True)
t4 = torch.rand((7, 9))
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
val = self._exec_func(exec_mode, torch.matmul, t1, t2)
val = self._exec_func(exec_mode, torch.linalg.multi_dot, (val, t3, t4))
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t2, t3, t4
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_unused_tensors(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
t3 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
s = self._exec_func(exec_mode, torch.stack, (t1, t2, t3))
val = self._exec_func(
exec_mode,
torch.matmul,
torch.narrow(s, 0, 0, 1),
torch.narrow(s, 0, 2, 1),
)
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t3
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_multiple_output_tensors(self):
local_grads = None
t = torch.rand((10, 2), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
tensor_list = self._exec_func(exec_mode, torch.split, t, 2)
t1 = tensor_list[0]
t2 = tensor_list[2]
t3 = tensor_list[4]
val = self._exec_func(exec_mode, torch.linalg.multi_dot, (t1, t2, t3))
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t
)
local_grads = ret if ret else local_grads
def _run_test_backward_unused_send_function_in_thread(self):
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
# We don't use the result of an RPC function, as a result the
# backward pass would hang in the "FAST" mode.
rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
)
val = torch.mul(t1, t2)
# Run backward, this would hang forever.
dist_autograd.backward(context_id, [val.sum()])
@dist_init
def test_backward_unused_send_function(self):
# Run the test in a thread which would never finish.
t = threading.Thread(
target=self._run_test_backward_unused_send_function_in_thread
)
t.daemon = True
t.start()
t.join(10) # Wait for 10s.
# Verify thread is still alive (indicating backward hasn't completed yet).
self.assertTrue(t.is_alive())
@dist_init
def test_backward_autograd_engine_error(self):
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
# Perform some ops before error simulation.
tmp = (t1 + t2) * (t1 + t2)
t3 = SimulateBackwardError.apply(tmp)
# Run multiple round trips across different nodes and verify the
# original node receives an error thrown on a node deep in the chain.
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t2, t3)
)
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.mul, args=(val, t2)
)
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(val, t2)
)
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.div, args=(val, t2)
)
with self.assertRaisesRegex(
RuntimeError, "Error on Node [0-9]+: Simulate error on backward pass"
):
# Run backwards, and validate we receive an error.
dist_autograd.backward(context_id, [val.sum()])
@dist_init(clean_shutdown=False)
@skip_but_pass_in_sandcastle_if(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_backward_node_failure(self):
rpc._set_rpc_timeout(5) # 5 seconds
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
res = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
)
# Wait for all RPCs to be done.
dist.barrier()
# Kill all odd rank nodes.
if self.rank % 2 == 0:
shutdown_error_regex = self.get_shutdown_error_regex()
# Wait for all other nodes to die.
for rank in range(self.world_size):
if rank % 2 != 0:
wait_until_node_failure(rank, shutdown_error_regex)
# Shutdown sequence is not very well defined and as a result
# we might see any error given by get_shutdown_error_regex()
with self.assertRaisesRegex(RuntimeError, shutdown_error_regex):
# Run backwards, and validate we receive an error since all
# other nodes are dead.
dist_autograd.backward(context_id, [res.sum()])
else:
# Exit all other nodes.
pass
@dist_init
def test_backward_without_context(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
context_id = 100 # dummy context_id
with self.assertRaisesRegex(
RuntimeError,
f"Could not find autograd context with id: {context_id}",
):
res = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
)
dist_autograd.backward(context_id, [res.sum()])
@dist_init
def test_backward_without_rpc(self):
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
t3 = torch.add(t1, t2)
dist_autograd.backward(context_id, [t3.sum()])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertIn(t1, grads)
self.assertIn(t2, grads)
self.assertEqual(torch.ones(3, 3), grads[t1])
self.assertEqual(torch.ones(3, 3), grads[t2])
@dist_init
def test_backward_invalid_args(self):
with dist_autograd.context() as context_id:
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
dist_autograd.backward(context_id, None)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
dist_autograd.backward(None, None)
with self.assertRaisesRegex(
RuntimeError, "No tensors provided for gradient computation"
):
dist_autograd.backward(context_id, [])
with self.assertRaisesRegex(RuntimeError, "requires_grad not set on"):
t = torch.rand(3, 3)
dist_autograd.backward(context_id, [t])
with self.assertRaisesRegex(
RuntimeError, "is not a scalar, all roots need to be scalar"
):
t = torch.rand(3, 3, requires_grad=True)
dist_autograd.backward(context_id, [t])
with self.assertRaisesRegex(
RuntimeError, "does not have a valid gradient function"
):
t = torch.rand(1, requires_grad=True)
dist_autograd.backward(context_id, [t])
@dist_init
def test_backward_multiple_roots(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]:
with dist_autograd.context() as context_id:
r1 = self._exec_func(exec_mode, torch.add, t1, t2).sum()
r2 = self._exec_func(exec_mode, torch.mul, t1, t2).sum()
r3 = self._exec_func(exec_mode, torch.cos, t1).sum()
r4 = self._exec_func(exec_mode, torch.div, t1, t2).sum()
local_grads = self._verify_backwards(
exec_mode, [r1, r2, r3, r4], context_id, local_grads, t1, t2
)
@dist_init
def test_backward_different_dtypes(self):
self._backward_different_dtypes(
torch.rand((3, 3), requires_grad=True, dtype=torch.float32),
torch.rand((3, 3), requires_grad=True, dtype=torch.float64),
False
)
@dist_init
def test_backward_simple_python_udf(self):
self._backward_simple_python_udf(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=True),
False
)
@dist_init
def test_backward_simple_script_call(self):
self._backward_simple_script_call(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=True),
False
)
@staticmethod
def _complex_python_udf(t1, t2):
t3 = torch.nn.functional.linear(t1, t2)
t4 = torch.nn.functional.linear(t2, t3)
t5 = torch.nn.functional.linear(t3, t4)
return torch.linalg.multi_dot([t1, t2, t3, t4, t5])
@dist_init
def test_backward_complex_python_udf(self):
# Run the same code locally and with dist autograd and verify gradients
# are same.
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(
exec_mode, DistAutogradTest._complex_python_udf, t1, t2
)
loss = ret.sum()
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
@staticmethod
def _python_udf_with_backward_error(t1, t2):
t3 = t1 + t2
t4 = SimulateBackwardError.apply(t3)
return torch.linalg.multi_dot([t1, t2, t3, t4])
@staticmethod
def _nested_rpc_call_backward_error(t1, t2, dst):
t1 = t1 * t2
t2 = t1 + t2
res = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._python_udf_with_backward_error,
args=(t1, t2),
)
return torch.linalg.multi_dot([t1, t2, res])
@dist_init
def test_backward_python_udf_error(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._nested_rpc_call_backward_error,
args=(t1, t2, self._next_rank()),
)
with self.assertRaisesRegex(
RuntimeError, "Simulate error on backward pass"
):
dist_autograd.backward(context_id, [loss.sum()])
_backward_done = False
@dist_init(clean_shutdown=False)
@skip_but_pass_in_sandcastle_if(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_backward_node_failure_python_udf(self):
# Set a short timeout to quickly time out failed RPCs.
rpc._set_rpc_timeout(5) # 5 seconds
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
dst = self._next_rank()
res = rpc.rpc_sync(
worker_name(dst),
my_py_nested_call,
args=(t1, t2, dst, self.world_size, 1),
)
dist.barrier()
# Kill rank 2 (last hop of nested rpc) and verify rank 0 receives an error.
if self.rank == 2:
return
store = dist.distributed_c10d._get_default_store()
if self.rank == 0:
# Wait for rank 2 to die.
shutdown_error_regex = self.get_shutdown_error_regex()
wait_until_node_failure(2, shutdown_error_regex)
# Shutdown sequence is not very well defined and as a result
# we might see any error given by get_shutdown_error_regex().
with self.assertRaisesRegex(RuntimeError, shutdown_error_regex):
# Run backwards, and validate we receive an error since rank 2 is dead.
dist_autograd.backward(context_id, [res.sum()])
# Mark rank 0 is done in the store, since the RPC framework on
# some nodes might be broken at this point.
store.set('test_backward_node_failure_python_udf_rank0_done', "True")
else:
# Wait for backward to finish on rank 0.
store.wait(['test_backward_node_failure_python_udf_rank0_done'], timedelta(seconds=10))
@staticmethod
def _nested_python_udf(t1, t2, dst):
t3 = t1 * t2
t4 = t1 + t2
res = rpc.rpc_sync(worker_name(dst), my_py_add, args=(t3, t4))
return t1 * t2 * t3 * t4 * res
@dist_init
def test_backwards_nested_python_udf(self):
# Run equivalent of _nested_python_udf locally.
self._backwards_nested_python_udf(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=True),
False
)
_test_clean_context_backward_context_id = None
class MyBackwardFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
assert DistAutogradTest._test_clean_context_backward_context_id is not None
# Release the context to simulate error (use barrier before releasing
# context to ensure all nodes execute the backward function).
dist.barrier()
dist_autograd._release_context(
DistAutogradTest._test_clean_context_backward_context_id
)
# Verify all contexts are cleaned up.
assert _all_contexts_cleaned_up()
return input
@dist_init
def test_clean_context_during_backward(self):
"""
This test simulates the situation where the 'backward' call might throw
an exception locally which would lead to the autograd context being
cleaned up if we're using the context manager. As a result, the autograd
context might be cleaned up while some threads are still using the
autograd context.
It is fine for the 'backward' call to throw an exception in this test,
but the process should not crash.
"""
initialize_pg(self.file_init_method, self.rank, self.world_size)
context = dist_autograd._new_context()
context_id = context._context_id()
DistAutogradTest._test_clean_context_backward_context_id = context_id
# Send the context id to all nodes.
for i in range(0, self.world_size):
if i != self.rank:
rank_distance = (i - self.rank + self.world_size) % self.world_size
rpc.rpc_sync(
worker_name(i),
_set_rpc_done,
args=(context_id, rank_distance),
)
dist.barrier()
# Verify all context ids have been received.
self.assertEqual(self.world_size - 1, len(known_context_ids))
t1 = torch.rand((3, 3), requires_grad=True)
for i in range(0, 100):
dst = self._next_rank()
t1 = rpc.rpc_sync(worker_name(dst), torch.add, args=(t1, t1))
# Call MyBackwardFunc as the first op of the backward pass to
# ensure we release the context early in the backward pass.
t1 = DistAutogradTest.MyBackwardFunc.apply(t1)
self.assertEqual(100, len(context._send_functions()))
context_id = 100 # dummy context_id
with self.assertRaisesRegex(
RuntimeError,
f"Could not find autograd context with id: {context_id}",
):
dist_autograd.backward(context_id, [t1.sum()])
# HACK: Killing workers since otherwise the autograd engine gets stuck on
# other nodes. The proper fix would be addressing:
# https://github.com/pytorch/pytorch/issues/27643, which would inform
# other nodes about the failure.
# The autograd engine gets stuck on other nodes since they're waiting to
# receive gradients from the node that received an error (and as a
# result it didn't execute the rest of the graph).
dist.barrier()
rpc.shutdown(graceful=False)
sys.exit(0)
@classmethod
def _call_remote_embedding(cls, embedding_rref, input, offsets, per_sample_weights):
embedding = embedding_rref.local_value()
return embedding(input, offsets, per_sample_weights)
@classmethod
def _get_grad(cls, embedding_rref, context_id):
embedding = embedding_rref.local_value()
grad_map = dist_autograd.get_gradients(context_id)
return grad_map[embedding.weight]
@classmethod
def _mixed_requires_grad_operaton(cls, t1, t2):
if t2.requires_grad:
return t1 - t2
else:
return t1 * t2
@dist_init
def test_mixed_requires_grad(self):
self._mixed_requires_grad(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=False),
False
)
class TestDebugInfoFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
debug_info = dist_autograd._get_debug_info()
assert debug_info is not None
backward_passes = int(debug_info["num_current_backward_passes"])
# Hard to validate exact numbers because of the distributed nature.
# We can't use a barrier() here since that would block the single
# CPU thread available for autograd and can cause deadlocks.
assert backward_passes >= 1 and backward_passes <= 4
return input
@dist_init
def test_debug_info(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
i = 0
res = {}
res[i] = t1
for rank in range(self.world_size):
if rank != self.rank:
res[i + 1] = rpc.rpc_sync(
worker_name(rank), torch.add, args=(res[i], t2)
)
i += 1
# Call custom function in middle of backward pass to ensure all
# nodes are still waiting on a backward().
res[i + 1] = DistAutogradTest.TestDebugInfoFunc.apply(res[i])
i += 1
for rank in range(self.world_size):
if rank != self.rank:
res[i + 1] = rpc.rpc_sync(
worker_name(rank), torch.add, args=(res[i], t2)
)
i += 1
dist_autograd.backward(context_id, [res[i].sum()])
debug_info = dist_autograd._get_debug_info()
num_autograd_context = int(debug_info["num_autograd_contexts"])
# Need atleast one context and not more than 4.
self.assertTrue(num_autograd_context >= 1 and num_autograd_context <= 4)
for rd in range(self.world_size - 1):
rpc.rpc_sync(
worker_name((self.rank + rd + 1) % self.world_size),
_set_rpc_done,
args=(context_id, rd + 1),
)
dist.barrier()
# Validate information
debug_info = dist_autograd._get_debug_info()
assert debug_info is not None
self.assertEqual(0, int(debug_info["num_current_backward_passes"]))
# only have `num_current_backward_passes` and `num_autograd contexts`
self.assertTrue(len(debug_info) == 2)
self.assertTrue(_all_contexts_cleaned_up())
# All contexts should be cleaned up.
debug_info = dist_autograd._get_debug_info()
self.assertEqual(0, int(debug_info["num_autograd_contexts"]))
@staticmethod
def _workload_thread():
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
t3 = rpc.rpc_sync("worker0", torch.add, args=(t1, t2))
t4 = rpc.rpc_sync("worker0", torch.mul, args=(t2, t3))
t5 = rpc.rpc_sync("worker0", torch.matmul, args=(t3, t4))
t6 = rpc.rpc_sync("worker0", torch.add, args=(t4, t5))
dist_autograd.backward(context_id, [t6.sum()])
@dist_init
def test_async_dist_autograd(self):
"""
This test ensures async processing for distributed autograd works
appropriately. This is achieved by spawning multiple threads and
hammering a single node with a lot of backward() calls.
"""
initialize_pg(self.file_init_method, self.rank, self.world_size)
if self.rank != 0:
# All other ranks schedule work on rank 0.
threads = []
for _ in range(20):
t = threading.Thread(target=DistAutogradTest._workload_thread)
t.start()
threads.append(t)
for thread in threads:
thread.join()
dist.barrier()
@dist_init
def test_backward_accumulate_grads(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
t3 = torch.matmul(t1, t2)
# Run backward twice.
torch.autograd.backward([t3.sum()], retain_graph=True)
torch.autograd.backward([t3.sum()])
t3 = rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(t1, t2)
)
# Run backward twice.
dist_autograd.backward(context_id, [t3.sum()], retain_graph=True)
dist_autograd.backward(context_id, [t3.sum()])
# Verify the gradients are same for local and remote execution.
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertIn(t1, grads)
self.assertIn(t2, grads)
self.assertEqual(t1.grad, grads[t1])
self.assertEqual(t2.grad, grads[t2])
@staticmethod
def _test_nested_backward_accumulate_grads(t1, t2, dst_rank):
return rpc.rpc_sync(worker_name(dst_rank), torch.add, args=(t1, t2))
@dist_init
def test_nested_backward_accumulate_grads(self):
self._nested_backward_accumulate_grads(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=True),
False
)
@dist_init
def test_multiple_backward(self):
self._multiple_backward(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=True),
False
)
@dist_init(clean_shutdown=False)
def test_multiple_backward_with_errors(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
f'worker{self._next_rank()}',
DistAutogradTest._python_udf_with_backward_error,
args=(t1, t2)).sum()
try:
# Run backward in a loop multiple times.
for i in range(100):
if i < 50:
with self.assertRaisesRegex(RuntimeError, "Simulate error on backward pass"):
dist_autograd.backward(context_id, [loss], retain_graph=True)
elif i > 50:
# Recovered from error.
dist_autograd.backward(context_id, [loss], retain_graph=True)
else:
dist.barrier()
SimulateBackwardError._simulate_error = False
dist.barrier()
finally:
# Sync before resetting flag.
dist.barrier()
# Reset the flag.
SimulateBackwardError._simulate_error = True
@dist_init
def test_backward_verify_hooks(self):
t1 = torch.ones((3, 3), requires_grad=True)
# Double the gradient.
t1.register_hook(lambda grad: grad * 2)
t2 = torch.ones((3, 3), requires_grad=True)
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(exec_mode, torch.matmul, t1, t2)
loss = ret.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@dist_init
def test_no_grad_copy(self):
'''
Similar to test in test_autograd.py.
'''
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad.data_ptr()
return grad, grad
class MyFuncSingleGrad(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp):
return inp
@staticmethod
def backward(ctx, grad):
MyFuncSingleGrad.static_grad_ptr = grad.data_ptr()
return grad
class NonContGradFunc(Function):
@staticmethod
def forward(ctx, inp1):
ctx.size = inp1.size()
return torch.tensor([1.])
@staticmethod
def backward(ctx, grad):
return torch.ones(1).expand(ctx.size)
a = torch.randn(5, 6, requires_grad=True)
b = torch.randn(5, 6, requires_grad=True)
# non-contiguous grad should be copied
with dist_autograd.context() as context_id:
dist_autograd.backward(context_id, [NonContGradFunc.apply(MyFunc.apply(a, b))])
grads = dist_autograd.get_gradients(context_id)
self.assertFalse(grads[a].data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(grads[b].data_ptr() == MyFunc.static_grad_ptr)
# test case that should trigger no copy for a
with dist_autograd.context() as context_id:
dist_autograd.backward(context_id, [MyFuncSingleGrad.apply(a)[1][0]])
grads = dist_autograd.get_gradients(context_id)
p_g = MyFuncSingleGrad.static_grad_ptr
p_a = grads[a].data_ptr()
# Verify there was no clone.
self.assertTrue(p_a == p_g)
# Test case that should trigger copy for both of a,b. This is
# different in the distributed autograd case since we hold
# a reference to all grads in a vector until all accumulation is done.
with dist_autograd.context() as context_id:
dist_autograd.backward(context_id, [MyFunc.apply(a, b)[1][0]])
grads = dist_autograd.get_gradients(context_id)
p_g = MyFunc.static_grad_ptr
p_a = grads[a].data_ptr()
p_b = grads[b].data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# both should be copied.
self.assertFalse(grads[a].data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(grads[b].data_ptr() == MyFunc.static_grad_ptr)
@dist_init
def test_no_grad_copy_sparse(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp):
return inp
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
return grad
class NonContGradFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
# Create a sparse tensor with non-contiguous indices and values
# and return as grad.
v = torch.rand(1, 3)
i = torch.ones(1, 1, dtype=torch.long)
nv = v.expand(8, 3)
ni = i.expand(1, 8)
ngrad = torch.sparse_coo_tensor(ni, nv, (10, 3), dtype=torch.float32)
NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr()
return ngrad, ngrad
a = torch.randn(10, 3, requires_grad=True)
b = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
# test case that should trigger no copy for a.
with dist_autograd.context() as context_id:
emb_matrix = MyFunc.apply(a)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
grads = dist_autograd.get_gradients(context_id)
p_g = MyFunc.static_grad_ptr
p_a = grads[a]._values().data_ptr()
# check a uses the same buffer
self.assertTrue(p_a == p_g)
# Run backwards multiple times.
for _ in range(10):
dist_autograd.backward(context_id, [loss], retain_graph=True)
# non-contiguous indices and value, we should trigger a copy.
with dist_autograd.context() as context_id:
emb_matrix = NonContGradFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
grads = dist_autograd.get_gradients(context_id)
p_g = NonContGradFunc.static_grad_ptr
p_a = grads[a]._values().data_ptr()
p_b = grads[b]._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# Verify we cloned both grads.
self.assertFalse(p_a == p_g)
self.assertFalse(p_b == p_g)
# Run backwards multiple times to verify accumulation.
for _ in range(10):
dist_autograd.backward(context_id, [loss], retain_graph=True)
@dist_init
def test_grad_copy_sparse_indices_extra_ref(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
static_grad_indices_ref = None
static_grad_values_ref = None
@staticmethod
def forward(ctx, inp):
return inp
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
# indices() and values() return views, so holding onto
# references of them would not increment refcount of indices
# and values inside the sparse tensor.
MyFunc.static_grad_indices_ref = grad._indices()
MyFunc.static_grad_values_ref = grad._values()
return grad
a = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
with dist_autograd.context() as context_id:
emb_matrix = MyFunc.apply(a)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
grads = dist_autograd.get_gradients(context_id)
p_g = MyFunc.static_grad_ptr
p_a = grads[a]._values().data_ptr()
self.assertIsNotNone(MyFunc.static_grad_indices_ref)
self.assertIsNotNone(MyFunc.static_grad_values_ref)
# grad would be stolen, since static_grad_indices_ref and
# static_grad_values_ref are holding onto views and don't bump the
# refcount.
self.assertTrue(p_g == p_a)
@dist_init
def test_post_hooks(self):
self.hook_called_times = 0
def post_hook_add_one(output_grads, input_grads):
self.hook_called_times += 1
return output_grads
def post_hook_add_two(output_grads, input_grads):
self.hook_called_times += 2
return output_grads
t = torch.rand(10, 10, requires_grad=True)
a = t + t
# Register post hooks
accumulate_grad_0 = a.grad_fn.next_functions[0][0]
accumulate_grad_0.register_hook(post_hook_add_one)
accumulate_grad_0.register_hook(post_hook_add_two)
accumulate_grad_1 = a.grad_fn.next_functions[1][0]
accumulate_grad_1.register_hook(post_hook_add_two)
with dist_autograd.context() as context_id:
loss = a.sum()
dist_autograd.backward(context_id, [loss])
self.assertEqual(5, self.hook_called_times)
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(1, len(grads))
self.assertTrue(t in grads)
@staticmethod
def _slow_add(t1, t2):
time.sleep(1)
t3 = t1 + t2
t3.requires_grad = True
return t3
@dist_init
def test_thread_local_context_id(self):
t1 = torch.rand((3, 3))
t2 = torch.rand((3, 3))
t3 = t1 + t2
t3.requires_grad = True
t3.sum().backward()
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, DistAutogradTest._slow_add, args=(t1, t2))
with dist_autograd.context() as context_id:
loss = rref.to_here().sum()
# due to slow add, the continuation of this backward pass will be
# invoked by the previous rpc.remote thread which does not have a
# valid context_id. So, this can test whether we propagate
# thread_local states properly when jumping across threads on the
# server side.
dist_autograd.backward(context_id, [loss])
self.assertTrue(
rpc.rpc_sync(
dst,
_compare_owner_value,
args=(context_id, rref, t3.grad)
)
)
class CudaDistAutogradTest(CommonDistAutogradTest):
@skip_if_lt_x_gpu(1)
@dist_init
def test_gpu_simple(self):
t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0")
t2 = torch.rand(3, 3, requires_grad=True, device="cuda:0")
(t1 + t2).sum().backward()
with dist_autograd.context() as context_id:
t3 = t1 + t2
dist_autograd.backward(context_id, [t3.sum()])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertEqual(t1.grad, grads[t1])
self.assertEqual(t2.grad, grads[t2])
@skip_if_lt_x_gpu(1)
@dist_init
def test_gpu_to_cpu_continuation(self):
t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0")
t2 = torch.rand(3, 3, requires_grad=True)
# Run a few iterations.
for _ in range(3):
t1.grad = None
t2.grad = None
# Root is CPU
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]:
with dist_autograd.context() as context_id:
t3 = self._exec_func(exec_mode, torch.add, t2, t2)
t4 = t3.cuda(0) + t1
t5 = self._exec_func(exec_mode, torch.add, t4.cpu(), t2)
t6 = t5.cuda(0) + t4
t7 = self._exec_func(exec_mode, torch.add, t6.cpu(), t5)
# Autograd graph consists of CPU -> GPU -> CPU execution.
ret = self._verify_backwards(
exec_mode, [t7.sum()], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@skip_if_lt_x_gpu(1)
@dist_init
def test_gpu_to_cpu_continuation_gpu_root(self):
t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0")
t2 = torch.rand(3, 3, requires_grad=True)
# Run a few iterations.
for _ in range(3):
t1.grad = None
t2.grad = None
# Root is CPU
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]:
with dist_autograd.context() as context_id:
t3 = self._exec_func(exec_mode, torch.add, t2, t2)
t4 = t3.cuda(0) + t1
t5 = self._exec_func(exec_mode, torch.add, t4.cpu(), t2)
t6 = t5.cuda(0) + t4
# Autograd graph consists of CPU -> GPU -> CPU execution.
ret = self._verify_backwards(
exec_mode, [t6.sum()], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
class FaultyAgentDistAutogradTest(RpcAgentTestFixture):
# Reusing a simplified helper function from DistAutogradTest to ensure
# autograd context is successfully cleaned up even when RPCs are failing.
def context_cleanup_test_helper(self, rpc_args, func):
initialize_pg(self.file_init_method, self.rank, self.world_size)
# test that in dist autograd, in the case that tensors communicated over RPC do
# NOT require grad, we still cleanup the dist autograd contexts created
# on other nodes. This is because the autograd context is still
# communicated over RPC even if tensor arguments do not require grad, as
# it is possible that the response could.
dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank}
with dist_autograd.context() as context_id:
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), func, args=rpc_args)
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# the thread's context id should be cleaned up
with self.assertRaises(RuntimeError):
dist_autograd._retrieve_context(context_id)
# Ensure all peers have finished mutating the
# `known_context_ids` set.
dist.barrier()
# check that all contexts have been cleaned up.
success = _all_contexts_cleaned_up()
self.assertTrue(success)
# no faulty_messages defined so this fails all retryable messages - see
# faulty_rpc_agent_test_fixture.py for the list of retryable messages.
@dist_init
def test_context_cleanup_tensor_with_grad(self):
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add)
@dist_init
def test_verify_backend_options(self):
self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_TENSORPIPE)
self.assertEqual(self.rpc_backend_options.num_worker_threads, 8)
self.assertEqual(self.rpc_backend_options.num_fail_sends, 3)
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4)
class WrapperModule(nn.Module):
def __init__(self, model, device):
super().__init__()
self.model = model.to(device)
def forward(self, *args):
return self.model(*args)
def gradients(self, ctx_id):
grads = dist_autograd.get_gradients(ctx_id)
return [grads[p] for p in self.model.parameters()]
class TensorPipeCudaDistAutogradTest(RpcAgentTestFixture):
@skip_if_lt_x_gpu(4)
def test_device_maps_backward_pass(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
# The reverse of this device mapping should be used for the backward pass.
options.set_device_map(dst, {self.rank: (self.rank + 1) % self.world_size})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
t1 = torch.rand(10, device=self.rank, requires_grad=True)
t2 = torch.rand(10, device=self.rank, requires_grad=True)
with dist_autograd.context() as context_id:
res = rpc.rpc_sync(dst, torch.add, args=(t1, t2))
dist_autograd.backward(context_id, [res.sum()])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(torch.ones(10), grads[t1])
self.assertEqual(torch.ones(10), grads[t2])
self.assertEqual(t1.device, grads[t1].device)
self.assertEqual(t2.device, grads[t2].device)
rpc.shutdown()
class MyRemoteCompute(torch.nn.Module):
def forward(self, input):
input = input * 2.0
return input
class MyLocalCompute(torch.nn.Module):
def __init__(self, next_stage):
super().__init__()
self.next_stage = next_stage
def forward(self, input):
return self.next_stage.rpc_sync().forward(input)
@skip_if_lt_x_gpu(4)
def test_dist_autograd_sync_streams(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
# The reverse of this device mapping should be used for the backward pass.
options.set_device_map(dst, {self.rank: (self.rank + 1) % self.world_size})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
remote_compute = rpc.remote(dst, TensorPipeCudaDistAutogradTest.MyRemoteCompute)
local_compute = TensorPipeCudaDistAutogradTest.MyLocalCompute(remote_compute)
for _ in range(10):
input = torch.rand([1000, 10000], device=self.rank, requires_grad=True)
# Run local autograd
result = input * 2.0
r = random.random()
loss = result.sum() * r
loss.backward()
# Run distributed autograd
with dist_autograd.context() as context_id:
result = local_compute(input)
loss = result.sum() * r
dist_autograd.backward(context_id, [loss])
# Compare grads.
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(input.grad, grads[input])
rpc.shutdown()
@skip_if_lt_x_gpu(4)
def test_gradients_synchronizations(self):
options = self.rpc_backend_options
for peer_rank in range(self.world_size):
options.set_device_map(worker_name(peer_rank), {self.rank: peer_rank})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 0:
# this is master
layers = [nn.Linear(2000, 2000) for _ in range(self.world_size - 1)]
local_layers = [l.to(0) for l in layers]
remote_layers = [
rpc.remote(
worker_name(rank),
WrapperModule,
args=(layers[rank - 1], rank)
) for rank in range(1, self.world_size)
]
x = torch.randn(5000, 2000).to(0)
# local iteration
local_model = nn.Sequential(*local_layers)
local_model(x).sum().backward()
# remote iteration
with dist_autograd.context() as context_id:
for remote_layer in remote_layers:
x = remote_layer.rpc_sync().forward(x)
dist_autograd.backward(context_id, [x.sum()])
futs = []
for remote_layer in remote_layers:
futs.append(remote_layer.rpc_async().gradients(context_id))
for i in range(len(futs)):
local_gradients = [p.grad for p in local_layers[i].parameters()]
for g1, g2 in zip(futs[i].wait(), local_gradients):
self.assertEqual(g1, g2)
rpc.shutdown()
```
|
======================================================================================================================================================
SOURCE CODE FILE: dist_optimizer_test.py
LINES: 1
SIZE: 10.58 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\rpc\dist_optimizer_test.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import threading
import torch
import torch.distributed.autograd as dist_autograd
import torch.distributed.rpc as rpc
from torch import optim
from torch.distributed.optim import DistributedOptimizer
from torch.testing._internal.dist_utils import dist_init
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
class MyModule:
lock = threading.Lock()
def __init__(self, requires_grad=True):
# cannot directly use torch.manual_seed(0) as all threads share the same
# default generator. The race from multiple RPC threads could mess up
# the draw order from the default RNG instance, leading to
# non-deterministic behavior. Hence, create a dedicated RNG here.
g_cpu = torch.Generator()
g_cpu.manual_seed(0)
self.w = torch.rand((3, 3), requires_grad=requires_grad, generator=g_cpu)
def forward(self, t1):
return torch.mm(self.w, t1)
def get_w(self):
return self.w
class FailingOptimizer(optim.Optimizer):
def __init__(self, params):
super().__init__(params, {})
def step(self, closure=None):
raise ValueError("Error running optimizer.")
class OptimizerFailingOnConstructor(optim.Optimizer):
def __init__(self, params):
super().__init__(params, {})
raise ValueError("Error creating optimizer.")
def step(self, closure=None):
raise NotImplementedError
def _call_method(method, obj_rref, *args, **kwargs):
return method(obj_rref.local_value(), *args, **kwargs)
def remote_method(method, obj_rref, *args, **kwargs):
"""
Call rpc.remote on a method in a remote object.
Args:
method: the method (for example, Class.method)
obj_rref (RRef): remote reference to the object
args: positional arguments to pass to the method
kwargs: keyword arguments to pass to the method
Returns a RRef to the remote method call result.
"""
return rpc.remote(
obj_rref.owner(),
_call_method,
args=[method, obj_rref] + list(args),
kwargs=kwargs,
)
def rpc_async_method(method, obj_rref, *args, **kwargs):
"""
Call rpc.rpc_async on a method in a remote object.
Args:
method: the method (for example, Class.method)
obj_rref (RRef): remote reference to the object
args: positional arguments to pass to the method
kwargs: keyword arguments to pass to the method
Returns a Future to the method call result.
"""
return rpc.rpc_async(
obj_rref.owner(),
_call_method,
args=[method, obj_rref] + list(args),
kwargs=kwargs,
)
class DistOptimizerTest(RpcAgentTestFixture):
@dist_init()
def test_dist_optim_exception(self):
# distributed version
owner1 = f"worker{(self.rank + 1) % self.world_size:d}"
owner2 = f"worker{(self.rank + 2) % self.world_size:d}"
remote_module1 = rpc.remote(owner1, MyModule)
remote_module2 = rpc.remote(owner2, MyModule)
remote_param1 = remote_method(MyModule.get_w, remote_module1)
remote_param2 = remote_method(MyModule.get_w, remote_module2)
dist_optim = DistributedOptimizer(
FailingOptimizer, [remote_param1, remote_param2]
)
with dist_autograd.context() as context_id:
g_cpu = torch.Generator()
g_cpu.manual_seed(0)
t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
output1 = rpc_async_method(MyModule.forward, remote_module1, t2)
output2 = rpc_async_method(MyModule.forward, remote_module2, output1.wait())
loss = torch.add(output2.wait(), t1).sum()
dist_autograd.backward(context_id, [loss])
with self.assertRaisesRegex(Exception, "Error running optimizer"):
dist_optim.step(context_id)
@dist_init()
def test_dist_optim_exception_on_constructor(self):
# distributed version
owner1 = f"worker{(self.rank + 1) % self.world_size:d}"
owner2 = f"worker{(self.rank + 2) % self.world_size:d}"
remote_module1 = rpc.remote(owner1, MyModule)
remote_module2 = rpc.remote(owner2, MyModule)
remote_param1 = remote_method(MyModule.get_w, remote_module1)
remote_param2 = remote_method(MyModule.get_w, remote_module2)
with self.assertRaisesRegex(Exception, "Error creating optimizer."):
DistributedOptimizer(
OptimizerFailingOnConstructor, [remote_param1, remote_param2]
)
def _test_dist_optim_base(self, optim_cls, *args, **kwargs):
# local version
module1 = MyModule()
module2 = MyModule()
params = [module1.get_w(), module2.get_w()]
local_optim = optim_cls(params, *args, **kwargs)
old_w1 = module1.w.detach().clone()
old_w2 = module2.w.detach().clone()
g_cpu = torch.Generator()
g_cpu.manual_seed(0)
t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
output1 = module1.forward(t2)
output2 = module2.forward(output1)
loss = torch.add(output2, t1).sum()
loss.backward()
local_optim.step()
# distributed version
owner1 = f"worker{(self.rank + 1) % self.world_size:d}"
owner2 = f"worker{(self.rank + 2) % self.world_size:d}"
remote_module1 = rpc.remote(owner1, MyModule)
remote_module2 = rpc.remote(owner2, MyModule)
remote_param1 = remote_method(MyModule.get_w, remote_module1)
remote_param2 = remote_method(MyModule.get_w, remote_module2)
# sanity check: local and remote initial weights should match
self.assertEqual(old_w1, remote_param1.to_here())
self.assertEqual(old_w2, remote_param2.to_here())
dist_optim = DistributedOptimizer(
optim_cls, [remote_param1, remote_param2], *args, **kwargs
)
with dist_autograd.context() as context_id:
g_cpu.manual_seed(0)
t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
output1 = rpc_async_method(MyModule.forward, remote_module1, t2)
output2 = rpc_async_method(MyModule.forward, remote_module2, output1.wait())
loss = torch.add(output2.wait(), t1)
dist_autograd.backward(context_id, [loss.sum()])
dist_optim.step(context_id)
new_w1 = rpc_async_method(MyModule.get_w, remote_module1).wait()
new_w2 = rpc_async_method(MyModule.get_w, remote_module2).wait()
# ensure optimizer changed weights
self.assertNotEqual(old_w1, new_w1)
self.assertNotEqual(old_w2, new_w2)
# ensure local equals remote
self.assertEqual(new_w1, module1.get_w())
self.assertEqual(new_w2, module2.get_w())
@dist_init()
def test_dist_optim(self):
self._test_dist_optim_base(optim.Adagrad, lr=0.05)
self._test_dist_optim_base(optim.Adam, lr=1e-2, amsgrad=True)
self._test_dist_optim_base(optim.AdamW, lr=0.05, amsgrad=True)
self._test_dist_optim_base(optim.SGD, lr=0.05)
self._test_dist_optim_base(optim.SGD, lr=1e-3, momentum=1, weight_decay=1, nesterov=True)
self._test_dist_optim_base(optim.Adadelta, rho=0.95)
self._test_dist_optim_base(optim.RMSprop, lr=0.05)
self._test_dist_optim_base(optim.Adamax, lr=0.05)
self._test_dist_optim_base(optim.Rprop, lr=0.05)
def _test_dist_optim_none_grads(self, optim_cls, *args, **kwargs):
# local version
module1 = MyModule()
module2 = MyModule(requires_grad=False)
params = [module1.get_w(), module2.get_w()]
local_optim = optim_cls(params, *args, **kwargs)
old_w1 = module1.w.detach().clone()
old_w2 = module2.w.detach().clone()
g_cpu = torch.Generator()
g_cpu.manual_seed(0)
t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
output1 = module1.forward(t2)
output2 = module2.forward(output1)
loss = torch.add(output2, t1).sum()
loss.backward()
local_optim.step()
# distributed version
owner1 = f"worker{(self.rank + 1) % self.world_size:d}"
owner2 = f"worker{(self.rank + 2) % self.world_size:d}"
remote_module1 = rpc.remote(owner1, MyModule)
remote_module2 = rpc.remote(owner2, MyModule, args=(False,))
remote_param1 = remote_module1.remote().get_w()
remote_param2 = remote_module2.remote().get_w()
# sanity check: local and remote initial weights should match
self.assertEqual(old_w1, remote_param1.to_here())
self.assertEqual(old_w2, remote_param2.to_here())
dist_optim = DistributedOptimizer(
optim_cls, [remote_param1, remote_param2], *args, **kwargs
)
with dist_autograd.context() as context_id:
g_cpu.manual_seed(0)
t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
output1 = remote_module1.rpc_async().forward(t2)
output2 = remote_module2.rpc_async().forward(output1.wait())
loss = torch.add(output2.wait(), t1)
dist_autograd.backward(context_id, [loss.sum()])
dist_optim.step(context_id)
new_w1 = remote_module1.rpc_async().get_w().wait()
new_w2 = remote_module2.rpc_async().get_w().wait()
# ensure optimizer changed weights for w1
self.assertNotEqual(old_w1, new_w1)
# ensure optimizer not changed weights for w2
self.assertEqual(old_w2, new_w2)
# ensure local equals remote
self.assertEqual(new_w1, module1.get_w())
self.assertEqual(new_w2, module2.get_w())
@dist_init()
def test_dist_optim_none_grads(self):
self._test_dist_optim_none_grads(optim.SGD, lr=0.05)
self._test_dist_optim_none_grads(optim.RMSprop, lr=0.05)
self._test_dist_optim_none_grads(optim.Rprop, lr=0.05)
self._test_dist_optim_none_grads(optim.Adadelta, rho=0.95)
```
|
====================================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.00 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\rpc\examples\__init__.py
ENCODING: utf-8
```py
```
|
=================================================================================================================================================================
SOURCE CODE FILE: parameter_server_test.py
LINES: 1
SIZE: 4.55 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\rpc\examples\parameter_server_test.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
# If you need to modify this file to make this test pass, please also apply same edits accordingly to
# https://github.com/pytorch/examples/blob/master/distributed/rpc/batch/parameter_server.py
# and https://pytorch.org/tutorials/intermediate/rpc_async_execution.html#batch-updating-parameter-server
import threading
from datetime import datetime
from time import perf_counter
import torch
import torch.distributed.rpc as rpc
import torch.nn as nn
from torch import optim
from torch.testing._internal.dist_utils import (
dist_init,
worker_name,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import RpcAgentTestFixture
batch_size = 20
in_features = 100
out_features = 30
num_batches = 4
def timed_log(text):
print(f"{datetime.now().strftime('%H:%M:%S')} {text}")
class BatchUpdateParameterServer:
def __init__(self, batch_update_size):
self.model = nn.Linear(in_features, out_features)
self.lock = threading.Lock()
self.future_model = torch.futures.Future()
self.batch_update_size = batch_update_size
self.curr_update_size = 0
self.optimizer = optim.SGD(self.model.parameters(), lr=0.001, momentum=0.9)
for p in self.model.parameters():
p.grad = torch.zeros_like(p)
def get_model(self):
return self.model
@staticmethod
@rpc.functions.async_execution
def update_and_fetch_model(ps_rref, grads):
self = ps_rref.local_value()
for p, g in zip(self.model.parameters(), grads):
if p.grad is None:
p.grad = g
else:
p.grad += g
with self.lock:
timed_log(f"PS got {self.curr_update_size}/{self.batch_update_size} updates")
self.curr_update_size += 1
fut = self.future_model
if self.curr_update_size >= self.batch_update_size:
for p in self.model.parameters():
p.grad /= self.batch_update_size
self.curr_update_size = 0
self.optimizer.step()
self.optimizer.zero_grad()
fut.set_result(self.model)
timed_log("PS updated model")
self.future_model = torch.futures.Future()
return fut
class Trainer:
def __init__(self, ps_rref):
self.ps_rref = ps_rref
self.loss_fn = nn.L1Loss()
def get_next_batch(self):
for _ in range(num_batches):
inputs = torch.randn(batch_size, in_features)
labels = torch.zeros(batch_size, out_features)
yield inputs, labels
def train(self):
name = rpc.get_worker_info().name
m = self.ps_rref.rpc_sync().get_model()
for inputs, labels in self.get_next_batch():
timed_log(f"{name} processing one batch")
self.loss_fn(m(inputs), labels).backward()
timed_log(f"{name} reporting grads")
m = rpc.rpc_sync(
self.ps_rref.owner(),
BatchUpdateParameterServer.update_and_fetch_model,
args=(self.ps_rref, [p.grad for p in m.cpu().parameters()]),
)
timed_log(f"{name} got updated model")
def run_trainer(ps_rref):
trainer = Trainer(ps_rref)
trainer.train()
def run_ps(trainers):
timed_log("Start training")
start = perf_counter()
ps_rref = rpc.RRef(BatchUpdateParameterServer(len(trainers)))
futs = [rpc.rpc_async(trainer, run_trainer, args=(ps_rref,)) for trainer in trainers]
torch.futures.wait_all(futs)
stop = perf_counter()
timed_log("Finish training")
timed_log(f"Time spent training: {stop - start}s")
class ParameterServerTest(RpcAgentTestFixture):
@dist_init(setup_rpc=False)
def test_batch_updating_parameter_server(self):
if self.rank != 0:
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
else:
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
run_ps([f"{worker_name(r)}" for r in range(1, self.world_size)])
rpc.shutdown()
```
|
===========================================================================================================================================================================
SOURCE CODE FILE: reinforcement_learning_rpc_test.py
LINES: 1
SIZE: 9.26 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\rpc\examples\reinforcement_learning_rpc_test.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
# If you need to modify this file to make this test pass, please also apply same edits accordingly to
# https://github.com/pytorch/examples/blob/master/distributed/rpc/rl/main.py
# and https://pytorch.org/tutorials/intermediate/rpc_tutorial.html
import numpy as np
import torch
import torch.distributed.rpc as rpc
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributed.rpc import RRef, rpc_sync, rpc_async, remote
from torch.distributions import Categorical
from torch.testing._internal.dist_utils import dist_init, worker_name
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import RpcAgentTestFixture
TOTAL_EPISODE_STEP = 5000
GAMMA = 0.1
SEED = 543
def _call_method(method, rref, *args, **kwargs):
r"""
a helper function to call a method on the given RRef
"""
return method(rref.local_value(), *args, **kwargs)
def _remote_method(method, rref, *args, **kwargs):
r"""
a helper function to run method on the owner of rref and fetch back the
result using RPC
"""
args = [method, rref] + list(args)
return rpc_sync(rref.owner(), _call_method, args=args, kwargs=kwargs)
class Policy(nn.Module):
r"""
Borrowing the ``Policy`` class from the Reinforcement Learning example.
Copying the code to make these two examples independent.
See https://github.com/pytorch/examples/tree/master/reinforcement_learning
"""
def __init__(self) -> None:
super().__init__()
self.affine1 = nn.Linear(4, 128)
self.dropout = nn.Dropout(p=0.6)
self.affine2 = nn.Linear(128, 2)
self.saved_log_probs = []
self.rewards = []
def forward(self, x):
x = self.affine1(x)
x = self.dropout(x)
x = F.relu(x)
action_scores = self.affine2(x)
return F.softmax(action_scores, dim=1)
class DummyEnv:
r"""
A dummy environment that implements the required subset of the OpenAI gym
interface. It exists only to avoid a dependency on gym for running the
tests in this file. It is designed to run for a set max number of iterations,
returning random states and rewards at each step.
"""
def __init__(self, state_dim=4, num_iters=10, reward_threshold=475.0):
self.state_dim = state_dim
self.num_iters = num_iters
self.iter = 0
self.reward_threshold = reward_threshold
def seed(self, manual_seed):
torch.manual_seed(manual_seed)
def reset(self):
self.iter = 0
return torch.randn(self.state_dim)
def step(self, action):
self.iter += 1
state = torch.randn(self.state_dim)
reward = torch.rand(1).item() * self.reward_threshold
done = self.iter >= self.num_iters
info = {}
return state, reward, done, info
class Observer:
r"""
An observer has exclusive access to its own environment. Each observer
captures the state from its environment, and send the state to the agent to
select an action. Then, the observer applies the action to its environment
and reports the reward to the agent.
"""
def __init__(self) -> None:
self.id = rpc.get_worker_info().id
self.env = DummyEnv()
self.env.seed(SEED)
def run_episode(self, agent_rref, n_steps):
r"""
Run one episode of n_steps.
Arguments:
agent_rref (RRef): an RRef referencing the agent object.
n_steps (int): number of steps in this episode
"""
state, _ep_reward = self.env.reset(), 0
for _ in range(n_steps):
# send the state to the agent to get an action
action = _remote_method(Agent.select_action, agent_rref, self.id, state)
# apply the action to the environment, and get the reward
state, reward, done, _ = self.env.step(action)
# report the reward to the agent for training purpose
_remote_method(Agent.report_reward, agent_rref, self.id, reward)
if done:
break
class Agent:
def __init__(self, world_size):
self.ob_rrefs = []
self.agent_rref = RRef(self)
self.rewards = {}
self.saved_log_probs = {}
self.policy = Policy()
self.optimizer = optim.Adam(self.policy.parameters(), lr=1e-2)
self.eps = np.finfo(np.float32).eps.item()
self.running_reward = 0
self.reward_threshold = DummyEnv().reward_threshold
for ob_rank in range(1, world_size):
ob_info = rpc.get_worker_info(worker_name(ob_rank))
self.ob_rrefs.append(remote(ob_info, Observer))
self.rewards[ob_info.id] = []
self.saved_log_probs[ob_info.id] = []
def select_action(self, ob_id, state):
r"""
This function is mostly borrowed from the Reinforcement Learning example.
See https://github.com/pytorch/examples/tree/master/reinforcement_learning
The main difference is that instead of keeping all probs in one list,
the agent keeps probs in a dictionary, one key per observer.
NB: no need to enforce thread-safety here as GIL will serialize
executions.
"""
probs = self.policy(state.unsqueeze(0))
m = Categorical(probs)
action = m.sample()
self.saved_log_probs[ob_id].append(m.log_prob(action))
return action.item()
def report_reward(self, ob_id, reward):
r"""
Observers call this function to report rewards.
"""
self.rewards[ob_id].append(reward)
def run_episode(self, n_steps=0):
r"""
Run one episode. The agent will tell each observer to run n_steps.
"""
# make async RPC to kick off an episode on all observers
futs = [
rpc_async(
ob_rref.owner(),
_call_method,
args=(Observer.run_episode, ob_rref, self.agent_rref, n_steps)
) for ob_rref in self.ob_rrefs
]
# wait until all observers have finished this episode
for fut in futs:
fut.wait()
def finish_episode(self):
r"""
This function is mostly borrowed from the Reinforcement Learning example.
See https://github.com/pytorch/examples/tree/master/reinforcement_learning
The main difference is that it joins all probs and rewards from
different observers into one list, and uses the minimum observer rewards
as the reward of the current episode.
"""
# joins probs and rewards from different observers into lists
R, probs, rewards = 0, [], []
for ob_id in self.rewards:
probs.extend(self.saved_log_probs[ob_id])
rewards.extend(self.rewards[ob_id])
# use the minimum observer reward to calculate the running reward
min_reward = min(sum(self.rewards[ob_id]) for ob_id in self.rewards)
self.running_reward = 0.05 * min_reward + (1 - 0.05) * self.running_reward
# clear saved probs and rewards
for ob_id in self.rewards:
self.rewards[ob_id] = []
self.saved_log_probs[ob_id] = []
policy_loss, returns = [], []
for r in rewards[::-1]:
R = r + GAMMA * R
returns.insert(0, R)
returns = torch.tensor(returns)
returns = (returns - returns.mean()) / (returns.std() + self.eps)
for log_prob, R in zip(probs, returns):
policy_loss.append(-log_prob * R)
self.optimizer.zero_grad()
policy_loss = torch.cat(policy_loss).sum()
policy_loss.backward()
self.optimizer.step()
return min_reward
def run_agent(agent, n_steps):
while True:
agent.run_episode(n_steps=n_steps)
agent.finish_episode()
if agent.running_reward > agent.reward_threshold:
print(f"Solved! Running reward is now {agent.running_reward}!")
break
class ReinforcementLearningRpcTest(RpcAgentTestFixture):
@dist_init(setup_rpc=False)
def test_rl_rpc(self):
if self.rank == 0:
# Rank 0 is the agent.
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
agent = Agent(self.world_size)
run_agent(agent, n_steps=int(TOTAL_EPISODE_STEP / (self.world_size - 1)))
# Ensure training was run. We don't really care about whether the task was learned,
# since the purpose of the test is to check the API calls.
self.assertGreater(agent.running_reward, 0.0)
else:
# Other ranks are observers that passively wait for instructions from the agent.
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
```
|
========================================================================================================================================================
SOURCE CODE FILE: faulty_agent_rpc_test.py
LINES: 1
SIZE: 14.12 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\rpc\faulty_agent_rpc_test.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import torch
import time
import torch.distributed.rpc as rpc
from torch.distributed.rpc.api import _delete_all_user_and_unforked_owner_rrefs
from torch.testing._internal.dist_utils import (
dist_init,
wait_until_pending_futures_and_users_flushed,
wait_until_owners_and_forks_on_rank,
worker_name,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
def my_sleep_func(seconds=1):
time.sleep(seconds)
return torch.mul(torch.tensor(1), torch.tensor(1))
@torch.jit.script
def my_script_func(tensor):
return torch.add(tensor, tensor)
def add_rref_to_value(rref, value):
return rref.to_here() + value
class FaultyAgentRpcTest(RpcAgentTestFixture):
# no faulty_messages defined so this fails all retryable messages - see
# faulty_rpc_agent_test_fixture.py for the list of retryable messages.
@dist_init(messages_to_delay={})
def test_check_failed_messages(self):
if self.rank == 0:
dst_worker_b = worker_name((self.rank + 1) % self.world_size)
dst_worker_c = worker_name((self.rank + 2) % self.world_size)
# Worker0 sends RPC to Worker1 and creates an RRef there
rref = rpc.remote(dst_worker_b, torch.add, args=(torch.ones(2, 2), torch.ones(2, 2)))
# Worker0 sends an RPC to Worker2 with the RRef as an arg
rpc.remote(dst_worker_c, add_rref_to_value, args=(rref, torch.ones(2, 2)))
# check if the output is as expected
self.assertEqual(rref.to_here(), torch.add(torch.ones(2, 2), torch.ones(2, 2)))
# explicitly delete all User RRefs
_delete_all_user_and_unforked_owner_rrefs()
@dist_init
def test_verify_backend_options(self):
self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_TENSORPIPE)
self.assertEqual(self.rpc_backend_options.num_worker_threads, 8)
self.assertEqual(self.rpc_backend_options.num_fail_sends, 3)
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4)
self.assertEqual(len(self.rpc_backend_options.messages_to_delay), 2)
self.assertEqual(self.rpc_backend_options.rpc_timeout, rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"])
def test_custom_faulty_messages(self):
self.assertEqual(
{"RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"},
set(self.rpc_backend_options.messages_to_fail),
)
@dist_init(faulty_messages=[])
def test_no_faulty_messages(self):
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 0)
@dist_init(messages_to_delay={"SCRIPT_CALL": 1.5})
def test_custom_messages_to_delay(self):
self.assertEqual(self.rpc_backend_options.messages_to_delay, {"SCRIPT_CALL": 1.5})
def _test_remote_message_dropped_pickle(self, dst=None):
if self.rank != 0:
return
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = f"worker{dst_rank}"
# Since we fail python_remote_call messages synchronously, the future
# corresponding to this remote call will be marked with an error when
# this function returns.
rref = rpc.remote(dst_worker, my_sleep_func, args=(1,))
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
# Attempt to fork the RRef should raise an error indicating the rpc.remote timeout.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref._serialize()
# Test that using RRef as arg over RPC (which forks) results in the same
# error
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 1))
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_remote_message_dropped_pickle(self):
self._test_remote_message_dropped_pickle()
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_remote_message_dropped_pickle_to_self(self):
self._test_remote_message_dropped_pickle(self.rank)
def _test_remote_message_dropped_timeout(self, func, args, dst=None):
if self.rank != 0:
return
# test the case where rpc.remote() message creation is completely dropped.
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = f"worker{dst_rank}"
# Since we fail python_remote_call messages synchronously, the future
# corresponding to this remote call will be marked with an error when
# this function returns.
rref = rpc.remote(dst_worker, func, args=args)
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
# Note: during shutdown, logs will indicate "Could not find OwnerRRef..."
# on the owning nodes, this is expected because the OwnerRRef was never
# successfully created. Therefore, delAllUsers will work as expected.
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_builtin_remote_message_dropped_timeout(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_dropped_timeout(func, args)
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_builtin_remote_message_dropped_timeout_to_self(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_dropped_timeout(func, args, dst=0)
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_udf_remote_message_dropped_timeout(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_dropped_timeout(func, args)
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_udf_remote_message_dropped_timeout_to_self(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_dropped_timeout(func, args, dst=0)
def _test_remote_message_delay_timeout(self, func, args, dst=None):
if self.rank != 0:
return
# Test the case where remote message is eventually processed on the owner,
# but the future on the creator times out before the response comes back.
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = f"worker{dst_rank}"
# 10 ms timeout
rref = rpc.remote(dst_worker, func, args=args, timeout=0.001)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
# to_here() should now pick up that rpc.remote() creation has failed.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
# Test the case where rpc.remote() times out, but to_here() has already
# started blocking before.
# NOTE: we only test this when not sending to self, as to_here() calls
# calls localValue(), which does not send an RPC and thus does not have
# a timeout. This can be supported by allowing future.wait() to
# take in an optional timeout (https://github.com/pytorch/pytorch/issues/39280)
if dst_rank != self.rank:
slow_rref = rpc.remote(dst_worker, func, args=args, timeout=2)
with self.assertRaisesRegex(RuntimeError, expected_error):
# to_here() should raise timeout error, since it does not know about the
# status of rpc.remote().
slow_rref.to_here(0.001)
# Note: If we proceed with shutdown, UserRRef will send out a RRefUserDelete
# but this can be a noop since it may not exist on the owner yet. Later,
# the owner can process the RRef creation and wait for the delete message,
# thus leading to a timeout.
# Therefore, we wait until we get notification that pending owners have
# been confirmed before sending out RRefUserDeletes.
if dst_rank != self.rank:
wait_until_owners_and_forks_on_rank(2, 2, rank=dst_rank)
@dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
def test_udf_remote_message_delay_timeout(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_delay_timeout(func, args)
@dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
def test_udf_remote_message_delay_timeout_to_self(self):
func = my_sleep_func
args = (1,)
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_builtin_delay_timeout(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_delay_timeout(func, args)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_builtin_delay_timeout_to_self(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_script_delay_timeout(self):
func = my_script_func
args = (torch.tensor(1),)
self._test_remote_message_delay_timeout(func, args)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_script_delay_timeout_to_self(self):
func = my_script_func
args = (torch.tensor(1),)
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_RREF_FETCH_CALL": 1})
def test_rref_to_here_timeout(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = f"worker{dst_rank}"
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref.to_here(0.01)
rref.to_here()
@dist_init(faulty_messages=[])
def test_rpc_builtin_timeout(self):
next_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(next_rank)
expected_error = self.get_timeout_error_regex()
# PYTHON_CALL message types which correspond to Python UDF over RPC
# by default get a delay (see faulty_rpc_agent_test_fixture)
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(
dst_worker,
torch.add,
args=(torch.tensor(1), torch.tensor(1)),
timeout=1,
)
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=1
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure that the currently set default timeout is large enough such
# that RPCs with delays still complete.
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
fut.wait()
# Ensure timeout if we set a new default and don't override
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if we specify timeout of 0
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=0
)
fut.wait()
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5})
def test_rpc_script_timeout(self):
next_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(next_rank)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
fut = rpc.rpc_async(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure that the currently set default timeout is large enough such
# that RPCs with delays still complete.
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),)
)
fut.wait()
# Ensure timeout if we set a new default and don't override
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),)
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if we specify timeout of 0
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),), timeout=0
)
fut.wait()
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
```
|
================================================================================================================================================================
SOURCE CODE FILE: faulty_rpc_agent_test_fixture.py
LINES: 1
SIZE: 2.23 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\rpc\faulty_rpc_agent_test_fixture.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import torch.distributed.rpc as rpc
import torch.distributed.rpc._testing # noqa: F401
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
# The following message types are currently retried in the RREF protocol and
# distributed autograd. Thus only these messages should be tested with the
# Faulty RPC Agent.
retryable_message_types = ["RREF_FORK_REQUEST",
"RREF_CHILD_ACCEPT",
"RREF_USER_DELETE",
"CLEANUP_AUTOGRAD_CONTEXT_REQ"]
# The following messages incur the corresponding delay in seconds while being
# processed in FaultyTensorPipeAgent's enqueueSend() function.
default_messages_to_delay = {
"PYTHON_CALL": 1.5, # Python UDF
"SCRIPT_CALL": 1.5, # Script/Builtin
}
class FaultyRpcAgentTestFixture(RpcAgentTestFixture):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.messages_to_fail = retryable_message_types
self.messages_to_delay = default_messages_to_delay
@property
def rpc_backend(self):
return rpc.backend_registry.BackendType[
"FAULTY_TENSORPIPE"
]
@property
def rpc_backend_options(self):
return rpc.backend_registry.construct_rpc_backend_options(
self.rpc_backend,
init_method=self.init_method,
num_worker_threads=8,
num_fail_sends=3,
messages_to_fail=self.messages_to_fail,
messages_to_delay=self.messages_to_delay,
)
def setup_fault_injection(self, faulty_messages, messages_to_delay):
if faulty_messages is not None:
self.messages_to_fail = faulty_messages
if messages_to_delay is not None:
self.messages_to_delay = messages_to_delay
def get_shutdown_error_regex(self):
error_regexes = [
"Exception in thread pool task",
"Connection reset by peer",
"Connection closed by peer"
]
return "|".join([f"({error_str})" for error_str in error_regexes])
def get_timeout_error_regex(self):
return "RPC ran for more than"
```
|
===============================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.00 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\rpc\jit\__init__.py
ENCODING: utf-8
```py
```
|
=========================================================================================================================================================
SOURCE CODE FILE: dist_autograd_test.py
LINES: 1
SIZE: 4.19 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\rpc\jit\dist_autograd_test.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import torch
import torch.distributed.autograd as dist_autograd
import torch.distributed.rpc as rpc
from torch import Tensor
from torch.distributed.rpc import rpc_async
from torch.testing import FileCheck
from torch.testing._internal.dist_utils import dist_init, worker_name
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
@torch.jit.script
def local_add(t1, t2):
return torch.add(t1, t2)
@torch.jit.script
def remote_add(t1, t2, dst: str): # noqa: E999
return rpc_async(dst, local_add, (t1, t2)).wait()
@torch.jit.script
def fork_add(t1, t2, dst: str):
fut = torch.jit._fork(remote_add, t1, t2, dst)
return torch.jit._wait(fut)
class JitDistAutogradTest(RpcAgentTestFixture):
@dist_init
def test_get_gradients(self):
@torch.jit.script
def dist_get_gradients(context_id: int) -> (dict[Tensor, Tensor]):
return dist_autograd.get_gradients(context_id)
FileCheck().check("get_gradients").run(str(dist_get_gradients.graph))
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
t3 = torch.add(t1, t2)
dist_autograd.backward(context_id, [t3.sum()])
grads = dist_get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertIn(t1, grads)
self.assertIn(t2, grads)
self.assertEqual(torch.ones(3, 3), grads[t1])
self.assertEqual(torch.ones(3, 3), grads[t2])
@dist_init
def test_dist_backward(self):
if self.rank != 0:
return
@torch.jit.script
def dist_backward_script(context_id: int, loss: torch.Tensor):
dist_autograd.backward(context_id, [loss])
FileCheck().check("dist_backward").run(str(dist_backward_script.graph))
with dist_autograd.context() as context_id:
t1 = torch.rand(3, 3, requires_grad=True)
t2 = torch.rand(3, 3, requires_grad=True)
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
loss = rpc.rpc_sync(dst_worker_name, torch.add, args=(t1, t2)).sum()
dist_backward_script(context_id, loss)
@dist_init
def test_jit_fork_within_context(self):
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
res = fork_add(t1, t2, dst_worker_name)
loss = res.sum()
dist_autograd.backward(context_id, [loss])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertIn(t1, grads)
self.assertIn(t2, grads)
@dist_init
def test_restore_context_after_swtich_to_jit_thread(self):
if self.rank != 0:
return
@torch.jit.script
def forward_script(
context_id: int, dst_worker_name: str, t1: Tensor, t2: Tensor
) -> tuple[Tensor, Tensor]:
res1_fut = rpc.rpc_async(dst_worker_name, local_add, (t1, t1))
res1 = res1_fut.wait() # After this, the script runs in a new JIT thread.
loss1 = res1.sum()
# SendRpcBackward is not attached, since DistAutogradContext is lost here.
res2_fut = rpc.rpc_async(dst_worker_name, local_add, (t2, t2))
res2 = res2_fut.wait()
loss2 = res2.sum()
return loss1, loss2
with dist_autograd.context() as context_id:
t1 = torch.ones((2, 3), requires_grad=True)
t2 = torch.ones((2, 3), requires_grad=True)
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
loss0, loss1 = forward_script(context_id, dst_worker_name, t1, t2)
dist_autograd.backward(context_id, [loss0, loss1])
grad0, grad1 = dist_autograd.get_gradients(context_id)
self.assertEqual(grad0, grad1)
```
|
===============================================================================================================================================
SOURCE CODE FILE: rpc_test.py
LINES: 1
SIZE: 47.25 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\rpc\jit\rpc_test.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import time
import io
from typing import Any
import torch
import torch.distributed as dist
import torch.distributed.rpc as rpc
from torch import Tensor
from torch.autograd.profiler import record_function
from torch.distributed.rpc import RRef
from torch.distributed.rpc.internal import RPCExecMode, _build_rpc_profiling_key
from torch.futures import Future
from torch.testing._internal.common_utils import TemporaryFileName
from torch.testing._internal.dist_utils import (
dist_init,
get_function_event,
initialize_pg,
worker_name,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
from torch.autograd.profiler_legacy import profile as _profile
def rref_isinstance(rref, cls_to_check):
return isinstance(rref.local_value(), cls_to_check)
def sleep(t):
time.sleep(t)
def rpc_return_rref(dst):
return rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1))
@torch.jit.script
def rref_local_value(rref: RRef[Tensor]) -> Tensor:
return rref.local_value()
@torch.jit.script
def list_create() -> list[int]:
global_list = [1, 2, 3]
return global_list
@torch.jit.script
def rref_list_mutate(rref: RRef[list[int]]) -> None:
rref.local_value().append(4)
rref.to_here().append(5)
rref.to_here(5.0).append(6)
def return_value(value: int) -> int:
return value
class RRefAPITest:
@dist_init
def test_rref_is_owner(self):
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
rref_var = rpc_return_rref(dst_worker_name)
@torch.jit.script
def rref_tensor_is_owner(rref_var: RRef[Tensor]) -> bool:
return rref_var.is_owner()
res = rref_tensor_is_owner(rref_var)
self.assertEqual(res, False)
@dist_init
def test_rref_local_value(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
rref = rpc_return_rref(dst_worker_name)
with self.assertRaisesRegex(
RuntimeError, r"Can't call RRef.local_value\(\) on a non-owner RRef"
):
rref_local_value(rref)
ret = ret = rpc.rpc_sync(dst_worker_name, rref_local_value, (rref,))
self.assertEqual(ret, torch.add(torch.ones(2, 2), 1))
@dist_init
def test_local_rref_local_value(self):
if self.rank != 0:
return
dst_worker_name = worker_name(self.rank)
rref = rpc.remote(dst_worker_name, return_value, (5,), {})
ret = rref_local_value(rref)
self.assertEqual(ret, 5)
def _create_rref(self):
owner_rank = (self.rank + 2) % self.world_size
return rpc.remote(
worker_name(owner_rank), torch.add, args=(torch.zeros(2, 2), 1)
)
@dist_init
def test_user_rrefs_confirmed(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret = rpc.rpc_sync(
worker_name(dst_rank), script_check_rref_confirmed, args=(rref,)
)
self.assertEqual(ret, True)
@dist_init
def test_user_rrefs_confirmed_remote(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret_rref = rpc.remote(
worker_name(dst_rank), script_check_rref_confirmed, args=(rref,)
)
self.assertEqual(ret_rref.to_here(), True)
@dist_init
def test_rref_list_mutate(self):
dst = worker_name((self.rank + 1) % self.world_size)
list_rref = rpc.remote(dst, list_create)
rpc.rpc_sync(dst, rref_list_mutate, args=(list_rref,))
self.assertEqual(list_rref.to_here(), [1, 2, 3, 4, 5, 6])
@torch.jit.script
def no_arg():
return 0
@torch.jit.script
def one_arg(value):
return value + 1
@torch.jit.script
def script_add_ones(x):
return torch.add(x, torch.ones(1))
@torch.jit.script
def script_add_ones_with_record_function(x, block: str):
with record_function(block):
return torch.add(x, torch.ones(1))
@torch.jit.script
def record_function_on_caller_rpc_async(dst_worker_name: str, block: str) -> Tensor:
t: Tensor = torch.ones(1)
with record_function(block):
fut1 = rpc.rpc_async(dst_worker_name, script_add_ones, (t, ))
# Extra operator call to avoid de-duplication of the next async call
# see https://github.com/pytorch/pytorch/pull/62710#discussion_r694680279
zero = torch.zeros_like(t)
fut2 = rpc.rpc_async(dst_worker_name, script_add_ones, (t, ))
res = fut1.wait() + fut2.wait() + zero
return res
@torch.jit.script
def script_fork_wait_udf(tensor):
fut = torch.jit._fork(script_add_ones, tensor)
x = torch.jit._wait(fut)
return x
@torch.jit.script
def rref_to_here(rref_var: RRef[Tensor]) -> Tensor:
return rref_var.to_here()
@torch.jit.script
def return_rref(rref_var: RRef[Tensor]) -> RRef[Tensor]:
return rref_var
@torch.jit.script
def script_raise_func(value):
if value.numel() == 2:
raise ValueError("Expected error")
return value + 1
@torch.jit.script
def script_fork_wait_throw(invalue):
fut = torch.jit._fork(script_raise_func, invalue)
value = torch.jit._wait(fut)
return value
@torch.jit.script
def call_rpc_with_profiling(record: torch.classes.profiler._RecordFunction, dst_worker_name: str) -> Tensor:
# Call rpc_async from within ScriptFunction and ensure that we can attach
# profiling callbacks. Note that handle here is a Tensor representation of
# RecordFunction.
fut = rpc.rpc_async(dst_worker_name, one_arg, (torch.tensor(1),))
torch.ops.profiler._call_end_callbacks_on_jit_fut(record, fut)
ret = fut.wait()
return ret
@torch.jit.script
def call_rpc_torchscript_with_record_function(dst_worker_name: str, block: str) -> Tensor:
fut = rpc.rpc_async(dst_worker_name, script_add_ones_with_record_function, (torch.tensor(1), block))
return fut.wait()
@torch.jit.script
def call_fork_with_profiling(record: torch.classes.profiler._RecordFunction) -> Tensor:
# Call fork from within ScriptFunction and ensure that we can attach profiling
# callbacks to the resulting future. Note that handle here is a Tensor
# representation of RecordFunction.
fut = torch.jit._fork(one_arg, torch.tensor(1))
torch.ops.profiler._call_end_callbacks_on_jit_fut(record, fut)
ret = fut.wait()
return ret
class MyScriptModuleWithRRefs(torch.jit.ScriptModule):
def __init__(self, dst_worker):
super().__init__()
self.rrefs = []
for _ in range(4):
self.rrefs.append(rpc_return_rref(dst_worker))
@torch.jit.script_method
def forward(self) -> Tensor:
res_tensor = torch.ones(2, 2)
for rref in self.rrefs:
res_tensor += rref.to_here()
return res_tensor
@torch.jit.ignore
def rref_python_annotation(rref_var: RRef[Tensor]) -> RRef[Tensor]:
return rref_var
@torch.jit.script
def rref_script_annotation(rref_var: RRef[Tensor]) -> Tensor:
return rref_python_annotation(rref_var).to_here()
class RRefTypingTest:
@dist_init
def test_rref_as_arg_and_return(self):
n = self.rank + 1
dst_rank = n % self.world_size
local_ret = one_arg(torch.ones(2, 2))
# create rref on current rank
rref = rpc.remote(worker_name(self.rank), one_arg, args=(torch.ones(2, 2),))
# pass rref to another user in rpc call
ret = rpc.rpc_sync(worker_name(dst_rank), rref_to_here, args=(rref,))
self.assertEqual(ret, local_ret)
# return rref in rpc call
rref1 = rpc.rpc_sync(worker_name(dst_rank), return_rref, args=(rref,))
self.assertEqual(rref1.to_here(), local_ret)
# pass rref to another user in remote call
rref2 = rpc.remote(worker_name(dst_rank), rref_to_here, args=(rref,))
self.assertEqual(rref2.to_here(), local_ret)
# return rref in remote call
rref3 = rpc.remote(worker_name(dst_rank), return_rref, args=(rref,))
self.assertEqual(rref3.to_here().to_here(), local_ret)
@dist_init
def test_my_script_module_with_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
module_with_rrefs = MyScriptModuleWithRRefs(worker_name(dst_rank))
res = module_with_rrefs()
self.assertEqual(res, torch.ones(2, 2) * 9)
@dist_init
def test_rref_python_annotation(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_var = rpc_return_rref(worker_name(dst_rank))
res = rref_script_annotation(rref_var)
self.assertEqual(res, torch.ones(2, 2) + 1)
class FutureTypingTest:
@dist_init
def test_future_passed_between_python_and_jit(self):
dst_rank = (self.rank + 1) % self.world_size
inputs = (torch.tensor([1, 1]), torch.tensor([2, 2]))
ret_fut = rpc.rpc_async(worker_name(dst_rank), two_args_two_kwargs, args=inputs)
expected_res = torch.tensor([10, 10])
@torch.jit.script
def future_wait_in_script(fut: Future[Tensor]) -> Tensor:
return fut.wait()
self.assertEqual(future_wait_in_script(ret_fut), expected_res)
@torch.jit.script
def future_return_to_python(
dst_rank: int, inputs: tuple[Tensor, Tensor]
) -> Future[Tensor]:
return rpc.rpc_async(
f"worker{dst_rank}", two_args_two_kwargs, inputs
)
fut_res = future_return_to_python(dst_rank, inputs)
self.assertEqual(fut_res.wait(), expected_res)
@dist_init
def test_future_python_annotation(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
input_0 = torch.ones(2, 2)
input_1 = 1
expected_res = torch.add(input_0, input_1)
@torch.jit.ignore
def python_return_future() -> Future[Tensor]:
fut = rpc.rpc_async(dst_worker_name, torch.add, (input_0, input_1), {})
return fut
@torch.jit.script
def script_use_future() -> Tensor:
fut = python_return_future()
return fut.wait()
res = script_use_future()
self.assertEqual(res, expected_res)
@torch.jit.script
class MyScriptClass:
def __init__(self, a: int):
self.a = a
def get_value(self) -> int:
return self.a
@torch.jit.interface
class MyModuleInterface(torch.nn.Module):
def forward(self) -> Tensor:
# pyre-ignore[7]: Pyre and torch.jit.interface don't mix well
pass
class MyScriptModule(torch.jit.ScriptModule):
def __init__(self, rank):
super().__init__()
self.a = torch.ones(rank)
@torch.jit.script_method
def forward(self) -> Tensor:
return self.a
@torch.jit.script_method
def custom_func(self) -> Tensor:
return self.a
def owner_create_rref_my_script_class(a):
return rpc.RRef(MyScriptClass(a))
def owner_create_rref_my_script_module(a):
return rpc.RRef(MyScriptModule(a), type_hint=MyModuleInterface)
@torch.jit.script
def script_rref_get_value_my_script_class(rref: RRef[MyScriptClass]) -> int:
return rref.to_here().get_value()
@torch.jit.script
def script_rref_run_forward_my_script_module(rref: RRef[MyModuleInterface]) -> Tensor:
return rref.to_here().forward()
class LocalRRefTest:
@dist_init
def test_create_local_script_class_rref_in_py(self):
if self.rank != 0:
return
# Create a local RRef<MyScriptClass>.
rref_script_class = rpc.RRef(MyScriptClass(self.rank))
ret = rref_script_class.to_here().get_value()
self.assertEqual(ret, self.rank)
@dist_init
def test_create_local_script_module_rref_in_py(self):
if self.rank != 0:
return
# Create a local RRef<MyModuleInterface>.
rref_script_module = rpc.RRef(MyScriptModule(self.rank), MyModuleInterface)
ret = rref_script_module.to_here().forward()
self.assertEqual(ret, torch.ones(self.rank))
# Create a local RRef<MyModuleInterface> without type hint.
with self.assertRaisesRegex(
RuntimeError,
(
"The RRef being created contains a ScriptModule, "
"must provide its ModuleInterface type hint."
),
):
rref_script_module = rpc.RRef(MyScriptModule(self.rank))
@dist_init
def test_return_local_script_class_rref_in_py_and_use_in_script(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
# Create a local RRef<MyScriptClass> remotely in Python.
rref = rpc.rpc_sync(
dst_worker_name, owner_create_rref_my_script_class, args=(self.rank,)
)
def use_rref_on_owner(rref: RRef[MyScriptClass]) -> int:
args = (rref,)
kwargs: dict[str, Any] = {}
fut = rpc.rpc_async(
rref.owner(), script_rref_get_value_my_script_class, args, kwargs
)
ret = fut.wait()
return ret
# Use RRef<MyScriptClass> in local Python RPC and remote Script run.
ret = use_rref_on_owner(rref)
self.assertEqual(ret, self.rank)
# Use RRef<MyScriptClass> in local Script RPC and remote Script run.
use_rref_on_owner_script = torch.jit.script(use_rref_on_owner)
ret = use_rref_on_owner_script(rref)
self.assertEqual(ret, self.rank)
@dist_init
def test_return_local_script_module_rref_in_py_and_use_in_script(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
# Create a local RRef<MyModuleInterface> remotely in Python.
rref = rpc.rpc_sync(
dst_worker_name, owner_create_rref_my_script_module, args=(self.rank,)
)
def use_rref_on_owner(rref: RRef[MyModuleInterface]) -> Tensor:
args = (rref,)
kwargs: dict[str, Any] = {}
fut = rpc.rpc_async(
rref.owner_name(),
script_rref_run_forward_my_script_module,
args,
kwargs,
)
ret = fut.wait()
return ret
# Use RRef<MyScriptClass> in local Python RPC and remote Script run.
ret = use_rref_on_owner(rref)
self.assertEqual(ret, torch.ones(self.rank))
# Use RRef<MyScriptClass> in local Script RPC and remote Script run.
use_rref_on_owner_script = torch.jit.script(use_rref_on_owner)
ret = use_rref_on_owner_script(rref)
self.assertEqual(ret, torch.ones(self.rank))
def python_function():
return 0
@torch.jit.script
def two_args_two_kwargs(
first_arg,
second_arg,
first_kwarg=torch.tensor([3, 3]),
second_kwarg=torch.tensor([4, 4]),
):
return first_arg + second_arg + first_kwarg + second_kwarg
@torch.jit.script
def assorted_types_args_kwargs(
tensor_arg: Tensor, # noqa: E999
str_arg: str,
int_arg: int,
tensor_kwarg: Tensor = torch.tensor([2, 2]),
str_kwarg: str = "str_kwarg",
int_kwarg: int = 2,
):
return tensor_arg + tensor_kwarg, str_arg + str_kwarg, int_arg + int_kwarg
@torch.jit.script
def raise_script():
raise RuntimeError("Expected error")
@torch.jit.script
def script_rpc_async_call(
dst_worker_name: str, args: tuple[Tensor, Tensor], kwargs: dict[str, Tensor]
):
fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs)
ret = fut.wait()
return ret
@torch.jit.script
def script_rpc_sync_call(
dst_worker_name: str, args: tuple[Tensor, Tensor], kwargs: dict[str, Tensor]
):
res = rpc.rpc_sync(dst_worker_name, two_args_two_kwargs, args, kwargs)
return res
@torch.jit.script
def script_rpc_remote_call(
dst_worker_name: str, args: tuple[Tensor, Tensor], kwargs: dict[str, Tensor]
):
rref_res = rpc.remote(dst_worker_name, two_args_two_kwargs, args, kwargs)
return rref_res.to_here()
class JitRpcOpTest:
# Call functions remotely from Script.
@dist_init
def test_all_kwargs_are_populated_by_defaults(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
kwargs = {}
for script_op in [script_rpc_async_call, script_rpc_sync_call, script_rpc_remote_call]:
ret = script_op(
dst_worker_name, args, kwargs
)
self.assertEqual(ret, torch.tensor([10, 10]))
@dist_init
def test_some_kwargs_are_populated_by_defaults(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
kwargs = {"first_kwarg": torch.tensor([2, 2])}
for script_op in [script_rpc_async_call, script_rpc_sync_call, script_rpc_remote_call]:
ret = script_op(
dst_worker_name, args, kwargs
)
self.assertEqual(ret, torch.tensor([9, 9]))
@dist_init
def test_no_kwargs_are_populated_by_defaults(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
kwargs = {
"first_kwarg": torch.tensor([2, 2]),
"second_kwarg": torch.tensor([3, 3]),
}
for script_op in [script_rpc_async_call, script_rpc_sync_call, script_rpc_remote_call]:
ret = script_op(
dst_worker_name, args, kwargs
)
self.assertEqual(ret, torch.tensor([8, 8]))
@dist_init
def test_args_and_kwargs_contain_different_types(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
@torch.jit.script
def script_rpc_async_call_with_assorted_types(
dst_worker_name: str,
):
args = (torch.tensor([1, 1]), "str_arg", 1)
# Must annotate the value type as `Any`, because JIT type inference
# does not support multiple types when defining a Dict.
# The error JIT gives is,
# "Dict values must contain only a single type, "
# "expected: Tensor but found str instead."
kwargs: dict[str, Any] = {
"tensor_kwarg": torch.tensor([3, 3]),
"str_kwarg": "_str_kwarg",
"int_kwarg": 3,
}
fut = rpc.rpc_async(
dst_worker_name, assorted_types_args_kwargs, args, kwargs
)
ret = fut.wait()
return ret
ret = script_rpc_async_call_with_assorted_types(
dst_worker_name
)
self.assertEqual(ret, (torch.tensor([4, 4]), "str_arg_str_kwarg", 4))
@dist_init
def test_kwargs_not_passed(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
@torch.jit.script
def script_rpc_async_call_without_kwargs_passed(
dst_worker_name: str,
):
args = ()
fut = rpc.rpc_async(dst_worker_name, no_arg, args)
ret = fut.wait()
return ret
ret = script_rpc_async_call_without_kwargs_passed(
dst_worker_name
)
self.assertEqual(ret, 0)
@dist_init
def test_args_kwargs_are_neither_passed(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
@torch.jit.script
def script_rpc_async_call_without_args_kwargs_passed(
dst_worker_name: str,
):
fut = rpc.rpc_async(dst_worker_name, no_arg)
ret = fut.wait()
return ret
ret = script_rpc_async_call_without_args_kwargs_passed(
dst_worker_name
)
self.assertEqual(ret, 0)
@dist_init
def test_less_than_needed_args_are_specified(self):
if self.rank != 0:
return
# Notice, args matching happens during scripting.
with self.assertRaisesRegex(RuntimeError, "Argument second_arg not provided"):
@torch.jit.script
def script_rpc_async_call_with_less_args(
dst_worker_name: str, # noqa: E999
):
args = (torch.tensor([1, 1]),)
kwargs = {}
fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs)
ret = fut.wait()
return ret
@dist_init
def test_more_than_needed_args_are_specified(self):
if self.rank != 0:
return
# Notice, args matching happens during scripting.
with self.assertRaisesRegex(
RuntimeError,
"Expected at most 4 arguments but found 5 positional arguments",
):
@torch.jit.script
def script_rpc_async_call_with_more_args(
dst_worker_name: str,
):
args = (
torch.tensor([1, 1]),
torch.tensor([2, 2]),
torch.tensor([3, 3]),
torch.tensor([4, 4]),
torch.tensor([5, 5]),
)
kwargs = {}
fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs)
ret = fut.wait()
return ret
@dist_init
def test_unexepected_kwarg_is_specified(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
# Notice, kwargs matching happens during execution.
@torch.jit.script
def script_rpc_async_call_with_unexpected_kwarg(
dst_worker_name: str, # noqa: E999
):
args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
kwargs = {"third_kwarg": torch.tensor([1, 1])}
fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs)
ret = fut.wait()
return ret
with self.assertRaisesRegex(
RuntimeError, "Unknown keyword argument 'third_kwarg'"
):
ret = script_rpc_async_call_with_unexpected_kwarg(
dst_worker_name
)
self.assertEqual(ret, 0)
@dist_init
def test_call_python_function_remotely_from_script_not_supported(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
@torch.jit.script
def rpc_async_call_remote_py_function_in_torchscript(dst_worker_name: str):
args = ()
kwargs = {}
fut = rpc.rpc_async(dst_worker_name, python_function, args, kwargs)
ret = fut.wait()
return ret
with self.assertRaisesRegex(
RuntimeError, "attempted to get undefined function"
):
ret = rpc_async_call_remote_py_function_in_torchscript(dst_worker_name)
self.assertEqual(ret, 0)
@dist_init
def test_call_script_function_that_raises_remotely_from_script(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
# Notice, TorchScript always translates(emits) Python `raise` statement,
# as the exception message string, "Exception",
# no matter what exception type and exception message are in the statement,
@torch.jit.script
def rpc_async_call_remote_raising_torchscript_in_torchscript(
dst_worker_name: str,
):
args = ()
kwargs = {}
fut = rpc.rpc_async(dst_worker_name, raise_script, args, kwargs)
ret = fut.wait()
return ret
with self.assertRaisesRegex(RuntimeError, "Expected error"):
ret = rpc_async_call_remote_raising_torchscript_in_torchscript(
dst_worker_name
)
self.assertEqual(ret, 0)
@dist_init
def test_call_script_function_that_not_exists_remotely_from_script(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
@torch.jit.script
def nonexisting_script():
return 0
@torch.jit.script
def rpc_async_call_remote_nonexisting_torchscript_in_torchscript(
dst_worker_name: str,
):
args = ()
kwargs = {}
fut = rpc.rpc_async(dst_worker_name, nonexisting_script, args, kwargs)
ret = fut.wait()
return ret
with self.assertRaisesRegex(
RuntimeError, "attempted to get undefined function nonexisting_script"
):
ret = rpc_async_call_remote_nonexisting_torchscript_in_torchscript(
dst_worker_name
)
self.assertEqual(ret, 0)
@torch.jit.ignore
def my_script_module_init(rank: int) -> MyModuleInterface:
return MyScriptModule(rank)
@torch.jit.script
def construct_my_script_module(rank: int) -> MyModuleInterface:
return my_script_module_init(rank)
@torch.jit.script
def run_ref_script_module(
ref_script_module: RRef[MyModuleInterface], t: Tensor
) -> Tensor:
module = ref_script_module.to_here()
return module.forward() + t
@torch.jit.script
def script_check_rref_confirmed(rref: RRef[Tensor]) -> bool:
return rref.confirmed_by_owner()
@torch.jit.script
def save_rref(rref_var: RRef[Tensor], fname: str) -> None:
torch.save(rref_var, fname)
@torch.jit.script
def script_add(x: Tensor, y: Tensor) -> Tensor:
return x + y
@rpc.functions.async_execution
@torch.jit.script
def async_add(to: str, x: Tensor, y: Tensor) -> Future[Tensor]:
return rpc.rpc_async(to, script_add, (x, y))
@rpc.functions.async_execution
@torch.jit.script
def async_wrong_type() -> Tensor:
return torch.zeros(2)
def load_script_module_with_pickled_rref(pickled_script_module):
f = io.BytesIO(pickled_script_module)
m = torch.jit.load(f)
return m()
class JitRpcTest(
RRefAPITest,
RRefTypingTest,
LocalRRefTest,
JitRpcOpTest,
FutureTypingTest,
RpcAgentTestFixture,
):
@dist_init
def test_torchscript_function(self):
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
local_ret = one_arg(torch.ones(2, 2))
ret = rpc.rpc_sync(dst_worker_name, one_arg, args=(torch.ones(2, 2),))
self.assertEqual(ret, local_ret)
rref = rpc.remote(dst_worker_name, one_arg, args=(torch.ones(2, 2),))
self.assertEqual(rref.to_here(), local_ret)
# create rref to itself
local_rref = rpc.remote(
worker_name(self.rank), one_arg, args=(torch.ones(2, 2),)
)
self.assertEqual(local_rref.to_here(), local_ret)
@dist_init
def test_torchscript_function_exception(self):
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(RuntimeError, r"one_arg\(\) expected at most"):
rpc.rpc_sync(dst_worker_name, one_arg, args=(10, 20))
with self.assertRaisesRegex(RuntimeError, r"one_arg\(\) expected at most"):
rpc.remote(dst_worker_name, one_arg, args=(10, 20))
@dist_init
def test_torchscript_functions_not_supported(self):
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
my_local_script_module = MyScriptModule(self.rank)
# It is not thread safe to instantiate MyScriptModule in multiple threads,
# wait for local MyScriptModule instantiation to finish,
# otherwise it could instantiate MyScriptModule in parallel with
# server thread in the below
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
# rpc_sync still accepts script class and run it in
# the same code path as python call.
rpc.rpc_sync(dst_worker_name, MyScriptClass, args=(self.rank,))
# rpc_sync does not accept script module method.
# Python 3.5 and Python 3.6 throw different error message, the only
# common word can be greped is "pickle".
with self.assertRaisesRegex(TypeError, "pickle"):
rpc.rpc_async(
dst_worker_name, my_local_script_module.forward, args=()
)
@dist_init
def test_remote_script_module(self):
# TODO, need more investigation
# there is rref leak when shutting down, suspect it is because
# ref as arg is passed to pybind boundary, and the ref is not garbage
# collected by python when calling shutdown()
import torch.distributed.rpc.api as api
api._ignore_rref_leak = True
local_ret = torch.ones(self.rank) + torch.ones(self.rank)
n = self.rank + 1
dst_rank = n % self.world_size
remote_ref = rpc.remote(
worker_name(dst_rank), construct_my_script_module, args=(self.rank,)
)
# pass rref arg to owner
ret = rpc.rpc_sync(
worker_name(dst_rank),
run_ref_script_module,
args=(remote_ref, torch.ones(self.rank)),
)
self.assertEqual(ret, local_ret)
# pass rref arg to self/user
with self.assertRaisesRegex(
RuntimeError,
"is an RRef to a ScriptModule. It can't be sent through RPC from owner,",
):
ret = rpc.rpc_sync(
worker_name(self.rank),
run_ref_script_module,
args=(remote_ref, torch.ones(self.rank)),
)
@dist_init
def test_create_script_module_on_remote(self):
dst_name = worker_name((self.rank + 1) % self.world_size)
# Construct on remote end with rpc_sync
created_script_module = rpc.rpc_sync(
dst_name, MyScriptModule, args=(self.rank,)
)
# Forward should output a ones tensor of self.rank.
self.assertTrue(isinstance(created_script_module, torch.jit.ScriptModule))
rank_ones_tensor = created_script_module()
self.assertEqual(torch.ones(self.rank), rank_ones_tensor)
# Construct ScriptModule with rpc.remote.
remote_script_module = rpc.remote(dst_name, MyScriptModule, args=(self.rank,))
# Verify it is an instance of ScriptModule on remote end.
remote_end_is_script = rpc.rpc_sync(
remote_script_module.owner(),
rref_isinstance,
args=(remote_script_module, torch.jit.ScriptModule),
)
self.assertTrue(remote_end_is_script)
# Run forward pass remotely.
remote_forward_output = remote_script_module.rpc_sync().forward()
self.assertEqual(remote_forward_output, torch.ones(self.rank))
# Run function defined on ScriptModule remotely.
remote_func_output = remote_script_module.rpc_sync().custom_func()
self.assertEqual(remote_func_output, torch.ones(self.rank))
# Ensure we can transfer ScriptModule RRef to this rank and run
# forward pass.
local_script_module = remote_script_module.to_here()
self.assertTrue(isinstance(local_script_module, torch.jit.ScriptModule))
rank_ones_tensor = local_script_module()
self.assertEqual(rank_ones_tensor, torch.ones(self.rank))
local_script_func_output = local_script_module.custom_func()
self.assertEqual(local_script_func_output, torch.ones(self.rank))
@dist_init
def test_load_script_module_with_pickled_rref(self):
dst_name = worker_name((self.rank + 1) % self.world_size)
m1 = MyScriptModuleWithRRefs(dst_name)
m2 = MyScriptModuleWithRRefs(dst_name)
f = io.BytesIO()
rpc._enable_jit_rref_pickle()
torch.jit.save(m1, f)
rpc._disable_jit_rref_pickle()
out1 = rpc.rpc_sync(
dst_name,
load_script_module_with_pickled_rref,
args=(f.getvalue(),)
)
out2 = m2()
self.assertEqual(out1, out2)
@dist_init
def test_rref_jit_pickle_not_supported(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_var = rpc_return_rref(worker_name(dst_rank))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(
RuntimeError, "RRef jit pickling is only allowed inside RPC calls"
):
save_rref(rref_var, fname)
@dist_init
def test_remote_script_throw(self):
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
script_raise_func,
args=(torch.ones(2),),
)
with self.assertRaisesRegex(Exception, ".*Expected error.*"):
rref.to_here()
@dist_init
def test_remote_script_udf(self):
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
script_fork_wait_udf,
args=(torch.ones(2),),
)
self.assertEqual(rref.to_here(), torch.ones(2) * 2)
@dist_init
def test_async_script_udf(self):
future = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
script_fork_wait_udf,
args=(torch.ones(2),),
)
self.assertEqual(future.wait(), torch.ones(2) * 2)
@dist_init
def test_callback_simple(self):
def callback(fut):
return fut.wait() + 1
future = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
script_fork_wait_udf,
args=(torch.ones(2),),
).then(callback)
self.assertEqual(future.wait(), torch.ones(2) * 2 + 1)
@dist_init
def test_callback_chain(self):
n = self.rank + 1
def callback(fut):
return fut.wait() + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size), one_arg, args=(torch.ones(n, n),)
)
num_cbs = 20
for _ in range(num_cbs):
fut = fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) + 1 + num_cbs)
@dist_init
def test_add_done_callback(self):
callback_called = None
def callback(fut):
nonlocal callback_called
callback_called = fut.wait() * 2
future = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
script_fork_wait_udf,
args=(torch.ones(2),),
)
future.add_done_callback(callback)
future_then = future.then(lambda _: True)
self.assertEqual(future.wait(), torch.ones(2) * 2)
# We have no guarantee that the add_done_callback fn will execute before the test finishes.
# Adding a 'then' callback that runs afterwards to guarantee we wait for the first callback
future_then.wait()
self.assertEqual(callback_called, torch.ones(2) * 4)
@dist_init
def test_async_script_throw(self):
future = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
script_fork_wait_throw,
args=(torch.ones(2),),
)
with self.assertRaisesRegex(Exception, ".*Expected error.*"):
future.wait()
@dist_init
def test_callback_with_exception(self):
def callback(fut):
with self.assertRaisesRegex(Exception, ".*Expected error.*"):
fut.wait()
raise RuntimeError("Another expected error")
future = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
script_fork_wait_throw,
args=(torch.ones(2),),
).then(callback)
with self.assertRaisesRegex(RuntimeError, "Another expected error"):
future.wait()
@dist_init
def test_call_rpc_with_profiling(self):
# Ensures that we can call torch.ops.profiler._call_end_callbacks_on_jit_fut on a jit
# future from within a script function that calls rpc_async
if self.rank == 0:
with _profile() as prof:
prof_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
torch._jit_internal._qualified_name(one_arg),
"worker0",
"worker1",
)
with torch.autograd.profiler.record_function(prof_key) as rf:
call_rpc_with_profiling(rf.record, "worker1")
# TODO: Can't get a reliable time for this profiling event since
# it's hard to estimate the execution time on the remote end for non-UDFs.
# This can be resolved by https://github.com/pytorch/pytorch/issues/36272.
# After that, this test should be modified to validate the function time.
events = prof.function_events
function_event = get_function_event(events, prof_key)
self.assertTrue(torch._jit_internal._qualified_name(one_arg) in function_event.name)
@dist_init
def test_rpc_async_jit_profiled(self):
# Tests that rpc_async calls made from within a TorchScript function are
# profiled.
if self.rank == 0:
dst_rank = (self.rank + 1) % self.world_size
dst_worker_name = worker_name(dst_rank)
args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
kwargs = {}
with _profile() as prof:
script_rpc_async_call(
dst_worker_name, args, kwargs
)
# Ensure rpc_async call is profiled
function_events = prof.function_events
qual_name = torch._jit_internal._qualified_name(two_args_two_kwargs)
rpc_async_jit_event = [
event
for event in function_events
if qual_name in event.name and event.node_id == self.rank
]
self.assertEqual(len(rpc_async_jit_event), 1)
rpc_async_jit_event = rpc_async_jit_event[0]
profiled_name = _build_rpc_profiling_key(
RPCExecMode.ASYNC_JIT,
qual_name,
worker_name(self.rank),
dst_worker_name,
)
self.assertEqual(profiled_name, rpc_async_jit_event.name)
remote_events = [event for event in function_events if event.is_remote]
# All remote events should have taken place on dst_rank
remote_event_node_ids = {
remote_event.node_id for remote_event in remote_events
}
self.assertEqual(remote_event_node_ids, {dst_rank})
# script_rpc_async_call invokes add operator
# so we should see this as a remote event.
remote_add = next(
remote_event
for remote_event in remote_events
if "aten::add" in remote_event.name
)
remote_add_profiled_name = f"{profiled_name}#remote_op: aten::add"
self.assertEqual(remote_add.name, remote_add_profiled_name)
@dist_init
def test_record_function_on_caller_rpc_async(self):
if self.rank == 0:
dst_rank = (self.rank + 1) % self.world_size
dst_worker_name = worker_name(dst_rank)
block_scope = "foo"
with _profile() as prof:
# Runs 2 rpc_async calls within JIT under record_function.
record_function_on_caller_rpc_async(dst_worker_name, block_scope)
# Ensure record_function event is profiled.
function_events = prof.function_events
record_function_scope_event = [
event for event in function_events if event.name == block_scope
]
self.assertEqual(1, len(record_function_scope_event))
record_function_scope_event = record_function_scope_event[0]
# Ensure RPC future is profiled.
expected_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC_JIT,
torch._jit_internal._qualified_name(script_add_ones),
worker_name(self.rank),
dst_worker_name,
)
jit_rpc_events = [
event for event in function_events if event.name == expected_key
]
self.assertEqual(2, len(jit_rpc_events))
# Validate that the record_function scope time is greater than both
# of the individual RPC async call times. The reason it is not necessarily
# greater than the sum is because the two can execute in parallel.
for jit_rpc_event in jit_rpc_events:
self.assertTrue(
record_function_scope_event.cpu_time_total
> jit_rpc_event.cpu_time_total
)
@dist_init
def test_rpc_torchscript_record_function(self):
# tests that torchscript functions can be profiled using with
# record_function(...) over RPC.
REMOTE_OP_STR = "#remote_op: "
if self.rank == 0:
dst_rank = (self.rank + 1) % self.world_size
dst_worker_name = worker_name(dst_rank)
block_scope = "foo"
with _profile() as prof:
call_rpc_torchscript_with_record_function(dst_worker_name, block_scope)
# Need to call below to populate CPU children.
prof.key_averages()
function_events = prof.function_events
expected_key = (
_build_rpc_profiling_key(
RPCExecMode.ASYNC_JIT,
torch._jit_internal._qualified_name(
script_add_ones_with_record_function
),
worker_name(self.rank),
dst_worker_name,
)
+ REMOTE_OP_STR
+ block_scope
)
remote_record_function_event = next(
evt for evt in function_events if evt.name == expected_key
)
self.assertTrue(block_scope in remote_record_function_event.name)
remote_children = remote_record_function_event.cpu_children
self.assertTrue("aten::add" in child.name for child in remote_children)
def test_record_function_jit_end_callbacks_with_fork(self):
# Ensures that we can call rf._call_end_callbacks_on_future on a jit
# future in python eager mode with torch.jit.fork
sleep_interval = 1
with _profile() as prof:
with torch.autograd.profiler.record_function("foo") as rf:
fut = torch.jit._fork(sleep, sleep_interval)
rf._call_end_callbacks_on_future(fut)
fut.wait()
function_events = prof.function_events
sleep_event = get_function_event(function_events, "foo")
self.assertEqual(sleep_event.name, "foo")
# Validate that callbacks were fired at the right time by checking the
# profiling event cpu time
self.assertGreaterAlmostEqual(sleep_event.cpu_time * 1e-6, sleep_interval)
def test_call_fork_in_jit_with_profiling(self):
# Ensures that we can call torch.ops.profiler._call_end_callbacks_on_jit_fut on a jit
# future from within a script function with torch.jit.fork
with _profile() as prof:
with torch.autograd.profiler.record_function("foo") as rf:
call_fork_with_profiling(rf.record)
events = prof.function_events
function_event = get_function_event(events, "foo")
self.assertEqual(function_event.name, "foo")
@dist_init
def test_async_function_simple(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1, async_add, args=(dst2, torch.ones(2, 2), torch.ones(2, 2))
)
self.assertEqual(ret, torch.ones(2, 2) + 1)
@dist_init
def test_async_function_wrong_return_type(self):
with self.assertRaisesRegex(
RuntimeError,
"Async functions must return an IValue of Future type, but got Tensor",
):
rpc.rpc_sync(
worker_name((self.rank + 1) % self.world_size), async_wrong_type
)
@dist_init
def test_async_function_wrong_decorator_order(self):
# @torch.jit.script complains about undefined value rpc. Error is shown
# below. The reason for not checking error string is to avoid making
# JIT error handling code depend on RPC tests, as we don't have any
# restrictions on the error message here.
#
# RuntimeError:
# undefined value rpc:
# def async_wrong_decorator_order(to, x, y):
# # type: (str, Tensor, Tensor) -> Future[Tensor]
# return rpc.rpc_async(to, script_add, (x, y))
# ~~~ <--- HERE
with self.assertRaises(RuntimeError):
@torch.jit.script
@rpc.functions.async_execution
def async_wrong_decorator_order(
to: str, x: Tensor, y: Tensor
) -> Future[Tensor]:
return rpc.rpc_async(to, script_add, (x, y))
@dist_init
def test_async_function_remote(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
rref = rpc.remote(
dst1, async_add, args=(dst2, torch.ones(2, 2), torch.ones(2, 2))
)
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1)
@dist_init
def test_async_function_remote_multi(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
num = 20
rrefs = [
rpc.remote(
dst1,
async_add,
args=(dst2, torch.ones(2, 2), torch.ones(2, 2) * i)
) for i in range(num)
]
for i in range(num):
self.assertEqual(rrefs[i].to_here(), torch.ones(2, 2) + i)
@dist_init
def test_async_function_wrong_return_type_remote(self):
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size), async_wrong_type
)
with self.assertRaisesRegex(
RuntimeError,
"Async functions must return an IValue of Future type, but got Tensor",
):
rref.to_here()
```
|
======================================================================================================================================================
SOURCE CODE FILE: rpc_test_faulty.py
LINES: 1
SIZE: 8.03 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\rpc\jit\rpc_test_faulty.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import torch
import torch.distributed.rpc as rpc
from torch import Tensor
from torch.distributed.rpc import RRef
from torch.testing._internal.dist_utils import (
dist_init,
worker_name,
wait_until_pending_futures_and_users_flushed
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
@torch.jit.script
def two_args_two_kwargs(
first_arg,
second_arg,
first_kwarg=torch.tensor([3, 3]),
second_kwarg=torch.tensor([4, 4]),
):
return first_arg + second_arg + first_kwarg + second_kwarg
@torch.jit.script
def script_rpc_async_call(
dst_worker_name: str, args: tuple[Tensor, Tensor], kwargs: dict[str, Tensor]
):
fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs)
ret = fut.wait()
return ret
@torch.jit.script
def rpc_async_call_with_timeout(
dst_worker_name: str,
args: tuple[Tensor, Tensor],
kwargs: dict[str, Tensor],
timeout: float,
):
fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs, timeout)
ret = fut.wait()
return ret
@torch.jit.script
def rpc_async_call_with_timeout_future_ret(
dst_worker_name: str,
args: tuple[Tensor, Tensor],
kwargs: dict[str, Tensor],
timeout: float,
):
fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs, timeout)
return fut
@torch.jit.script
def rpc_async_call_future_ret(
dst_worker_name: str, args: tuple[Tensor, Tensor], kwargs: dict[str, Tensor]
):
fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs)
return fut
@torch.jit.script
def rref_to_here(rref_var: RRef[Tensor]) -> Tensor:
return rref_var.to_here()
@torch.jit.script
def rref_to_here_with_timeout(rref_var: RRef[Tensor], timeout: float) -> Tensor:
return rref_var.to_here(timeout)
@torch.jit.script
def rpc_async_with_rref_arg(dst_worker_name: str, args: tuple[RRef[Tensor]]) -> Tensor:
fut = rpc.rpc_async(dst_worker_name, rref_to_here, args)
ret = fut.wait()
return ret
class JitFaultyAgentRpcTest(RpcAgentTestFixture):
"""
Run tests for rpc_async in JIT under the faulty agent test fixture to test
arbitrary timeouts.
"""
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5})
def test_timeout_in_torchscript_function(self):
# Call rpc_async + fut.wait() in torchscript function and ensure that
# timeout is raised.
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
kwargs = {
"first_kwarg": torch.tensor([2, 2]),
"second_kwarg": torch.tensor([3, 3]),
}
expected_error = self.get_timeout_error_regex()
# Ensure that we get a timeout if we override the default timeout and
# the RPC takes longer to execute.
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc_async_call_with_timeout(dst_worker_name, args, kwargs, 0.5)
# Ensure that we timeout if we don't specify a timeout but the default
# is less than the RPC takes to execute.
rpc._set_rpc_timeout(0.001)
with self.assertRaisesRegex(RuntimeError, expected_error):
script_rpc_async_call(
dst_worker_name, args, kwargs
)
# Ensure that we run to completion if zero timeout is specified.
ret = rpc_async_call_with_timeout(dst_worker_name, args, kwargs, 0)
self.assertEqual(ret, torch.tensor([8, 8]))
# reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5})
def test_timeout_in_python(self):
# Ensures timeouts are raised if we call rpc_async from within a
# torchscript function, but wait on the future in python.
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
kwargs = {
"first_kwarg": torch.tensor([2, 2]),
"second_kwarg": torch.tensor([3, 3]),
}
expected_error = self.get_timeout_error_regex()
fut = rpc_async_call_with_timeout_future_ret(dst_worker_name, args, kwargs, 0.5)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure timeout if we don't specify but the default is less than the
# RPC takes to execute.
rpc._set_rpc_timeout(0.001)
fut = rpc_async_call_future_ret(dst_worker_name, args, kwargs)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if zero timeout is specified
fut = rpc_async_call_with_timeout_future_ret(dst_worker_name, args, kwargs, 0)
result = fut.wait()
self.assertEqual(result, torch.tensor([8, 8]))
# reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_remote_timeout_to_here_in_jit(self):
# Test that calling to_here() in JIT will raise timeout error if
# rpc.remote failed.
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = f"worker{dst_rank}"
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
# Will ensure error handling callbacks are run.
wait_until_pending_futures_and_users_flushed()
# Call to_here() within a ScriptFunction and ensure it raises
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref_to_here(rref)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_RREF_FETCH_CALL": 1})
def test_rref_to_here_timeout_in_jit(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = f"worker{dst_rank}"
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref_to_here_with_timeout(rref, 0.01)
rref_to_here_with_timeout(rref, 100)
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_rref_timeout_pickle_in_jit(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = f"worker{dst_rank}"
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
# Will ensure error handling callbacks are run.
wait_until_pending_futures_and_users_flushed()
# Call RPC with RRef arg in JIT, which will go through JIT pickling and
# ensure error is raised.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rpc_async_with_rref_arg(dst_worker, (rref, ))
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_rref_timeout_pickle_script_func(self):
# Similar to above test, but calls python rpc with script function.
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = f"worker{dst_rank}"
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
# Will ensure error handling callbacks are run.
wait_until_pending_futures_and_users_flushed()
# Call RPC with script function that takes RRef, ensure timeout during pickling
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rpc.rpc_sync(dst_worker, rref_to_here, args=(rref, ))
```
|
=========================================================================================================================================================
SOURCE CODE FILE: rpc_agent_test_fixture.py
LINES: 1
SIZE: 1.89 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\rpc\rpc_agent_test_fixture.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import os
from abc import ABC, abstractmethod
import torch.testing._internal.dist_utils
class RpcAgentTestFixture(ABC):
@property
def world_size(self) -> int:
return 4
@property
def init_method(self):
use_tcp_init = os.environ.get("RPC_INIT_WITH_TCP", None)
if use_tcp_init == "1":
master_addr = os.environ["MASTER_ADDR"]
master_port = os.environ["MASTER_PORT"]
return f"tcp://{master_addr}:{master_port}"
else:
return self.file_init_method
@property
def file_init_method(self):
return torch.testing._internal.dist_utils.INIT_METHOD_TEMPLATE.format(
file_name=self.file_name
)
@property
@abstractmethod
def rpc_backend(self):
pass
@property
@abstractmethod
def rpc_backend_options(self):
pass
def setup_fault_injection(self, faulty_messages, messages_to_delay): # noqa: B027
"""Method used by dist_init to prepare the faulty agent.
Does nothing for other agents.
"""
# Shutdown sequence is not well defined, so we may see any of the following
# errors when running tests that simulate errors via a shutdown on the
# remote end.
@abstractmethod
def get_shutdown_error_regex(self):
"""
Return various error message we may see from RPC agents while running
tests that check for failures. This function is used to match against
possible errors to ensure failures were raised properly.
"""
@abstractmethod
def get_timeout_error_regex(self):
"""
Returns a partial string indicating the error we should receive when an
RPC has timed out. Useful for use with assertRaisesRegex() to ensure we
have the right errors during timeout.
"""
```
|
===========================================================================================================================================
SOURCE CODE FILE: rpc_test.py
LINES: 4
SIZE: 229.04 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\rpc\rpc_test.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import concurrent.futures
import contextlib
import json
import os
import sys
import threading
import time
from collections import namedtuple
from functools import partial
from threading import Event
from threading import Lock
from unittest import mock
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.distributed.rpc as rpc
import torch.distributed.autograd as dist_autograd
from torch.distributed.rpc import RRef, _get_debug_info, _rref_context_get_debug_info, WorkerInfo
from torch.distributed.rpc.api import _use_rpc_pickler, _thread_local_var, _wait_all
from torch.distributed.rpc.internal import (
PythonUDF,
RPCExecMode,
_internal_rpc_pickler,
_build_rpc_profiling_key,
)
from torch.futures import Future
from torch.testing._internal.common_distributed import (
skip_if_lt_x_gpu,
captured_output,
tp_transports,
)
from torch.testing._internal.common_utils import (
IS_MACOS,
load_tests,
skip_but_pass_in_sandcastle_if,
get_cycles_per_ms,
)
from torch.testing._internal.dist_utils import (
dist_init,
get_function_event,
initialize_pg,
wait_until_node_failure,
wait_until_pending_futures_and_users_flushed,
wait_until_owners_and_forks_on_rank,
worker_name,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
from torch.testing._internal.common_utils import TemporaryFileName
from torch.autograd.profiler_legacy import profile as _profile
import operator
def foo_add():
return torch.add(torch.ones(1), torch.ones(1))
def udf_with_torch_ops(device=-1, use_record_function=False):
device_ctx = contextlib.nullcontext() if device == -1 else torch.cuda.device(device)
record_function_ctx = (
torch.autograd.profiler.record_function("##forward##")
if use_record_function
else contextlib.nullcontext()
)
with device_ctx, record_function_ctx:
t1, t2 = torch.ones(1), torch.ones(1)
t = torch.add(t1, t2)
t = torch.mul(t, t)
t = t.relu()
t = t.sigmoid()
# Events (operator invocations) that are expected to be ran as part of the above
# function.
EXPECTED_REMOTE_EVENTS = [
"aten::ones",
"aten::ones",
"aten::add",
"aten::mul",
"aten::relu",
"aten::clamp_min",
"aten::sigmoid",
]
# Remote operations are prefixed with the following string for RPC profiling.
REMOTE_OP_STR = "#remote_op: "
VALUE_FUTURE = concurrent.futures.Future()
DONE_FUTURE = concurrent.futures.Future()
FIFTY_MIL_CYCLES = 50000000
_rpc_barrier_count = 0
def _increment_count():
global _rpc_barrier_count
_rpc_barrier_count += 1
def _reset_count():
global _rpc_barrier_count
_rpc_barrier_count = 0
class StubRpcAgent:
def __init__(self, world_size):
self.world_size = world_size
def get_worker_infos(self):
return {
WorkerInfo(name=worker_name(rank), id=rank)
for rank in range(self.world_size)
}
def _stub_construct_rpc_backend_options_handler(**kwargs):
return mock.Mock() # RpcBackendOptions.
def _stub_init_rpc_backend_handler(store, name, rank, world_size, rpc_backend_options):
return StubRpcAgent(world_size=world_size)
def set_value(value):
VALUE_FUTURE.set_result(value)
def wait_for_value_future():
return VALUE_FUTURE.result()
def set_and_check_done(value):
VALUE_FUTURE.set_result(value)
return DONE_FUTURE.result()
# it is used to test python user defined function over rpc
# classes and functions are used to test python user defined class and
# methods over rpc
TensorClass = namedtuple("TensorClass", ["tensors"])
class MyPickleClass:
def __init__(self) -> None:
self.t = None
def __getstate__(self):
(pickled_python_udf, tensors) = _internal_rpc_pickler.serialize(
PythonUDF(my_tensor_function, (torch.ones(2, 2), torch.ones(2, 2)), None)
)
return (pickled_python_udf, tensors)
def __setstate__(self, obj):
python_udf = _internal_rpc_pickler.deserialize(obj[0], obj[1])
result = python_udf.func(python_udf.args[0], python_udf.args[1])
self.t = result
def set(self, val):
self.t = val
class SlowPickleClass:
def __init__(self, t):
self.t = t
def __getstate__(self):
time.sleep(self.t)
return (self.t, )
def __setstate__(self, obj):
self.t = obj[0]
time.sleep(self.t)
class MyClass:
def __init__(self, a, delay=False):
self.a = a
# delay initialization to simulate errors if specified
if delay:
time.sleep(2)
def my_instance_method(self, b):
return self.a + b
@classmethod
def my_class_method(cls, d, e):
return d + e
@staticmethod
def my_static_method(f):
return f > 10
def increment_value(self, increment):
self.a += increment
def get_value(self):
return self.a
def my_slow_method(self, my_tensor_arg):
time.sleep(5)
return torch.add(self.a, my_tensor_arg)
def _call_method_on_rref(method, rref, *args, **kwargs):
return method(rref.local_value(), *args, **kwargs)
def get_rref_list(values):
return [RRef(MyClass(a)) for a in values]
def add_rref_to_value(rref, value):
return rref.to_here() + value
def run_nested_pickle(pickle_cls_instance, tensor):
return pickle_cls_instance.t + tensor
def build_sparse_tensor(coalesce=False):
i = [[0, 1, 1], [2, 0, 2]]
v = [3, 4, 5]
tensor = torch.sparse_coo_tensor(i, v, (2, 3))
if coalesce:
tensor = tensor.coalesce()
return tensor
def build_complex_tensors():
a = torch.ones(3, 3)
b = [a, a]
c = [b, b]
d = [a, b]
e = {a: d}
return [a, b, c, d, e]
def non_cont_test(t_view, t_cont):
if t_view.is_contiguous():
raise Exception('t_view is contiguous!') # noqa: TRY002
if not t_cont.is_contiguous():
raise Exception('t_cont is not contiguous!') # noqa: TRY002
if not torch.equal(t_view, t_cont):
raise Exception('t_view is not equal to t_cont!') # noqa: TRY002
return t_view
def my_function(a, b, c):
return a + b + c
def my_tensor_function(a, b):
return a + b
def my_container_sum(a):
result = a[0]
for tensor in a[1:]:
result += tensor
return result
def my_sleep_func(seconds=1):
time.sleep(seconds)
return torch.mul(torch.tensor(1), torch.tensor(1))
def my_complex_tensor_function(list_input, tensor_class_input, dict_input):
res = list_input[0]
for t in list_input:
res += t
for v in dict_input.values():
res += v
complex_tensors = tensor_class_input.tensors
return (res, complex_tensors[0], complex_tensors[1], complex_tensors[2])
def my_rref_function(rref_a, rref_b):
return rref_a.to_here() + rref_b.to_here()
def delayed_add(a, b, seconds=0.05):
time.sleep(seconds)
return a + b
def identity(a):
return a
def no_result():
print("do nothing")
def raise_or_inc(value):
if value.numel() == 2:
raise ValueError("Expected error")
return value + 1
def nested_rpc(dst):
return rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
def nested_rpc_sparse(dst):
return rpc.rpc_sync(
dst,
torch.add,
args=(build_sparse_tensor(), build_sparse_tensor())
)
def multi_layer_nested_async_rpc(dst, world_size, ttl):
# this method returns immediately without blocking the callee, but will
# generate additional requests.
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
rpc.rpc_async(
current_dst,
multi_layer_nested_async_rpc,
args=(next_dst, world_size, ttl - 1),
)
return 0
def nested_rref(dst):
return (
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1)),
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 2)),
)
def nested_rref_sparse(dst):
return (
rpc.remote(
dst,
torch.add,
args=(build_sparse_tensor(), build_sparse_tensor())
),
rpc.remote(
dst,
torch.add,
args=(build_sparse_tensor(), build_sparse_tensor())
),
)
def nested_remote(dst):
rref = rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 3))
return rref.to_here()
def nested_remote_sparse(dst):
rref = rpc.remote(dst, torch.add, args=(build_sparse_tensor(), build_sparse_tensor()))
return rref.to_here()
def rref_forward_chain(dst, world_size, rref, ttl):
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
ret_rref = rpc.remote(
current_dst, rref_forward_chain, args=(next_dst, world_size, rref, ttl - 1)
)
return [ret_rref]
else:
return rref.to_here()
def rpc_return_rref(dst):
return rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1))
def light_rpc():
return 0
def heavy_rpc(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
def heavy_rpc_sparse(tensor):
for i in range(1, 100):
tensor *= i
tensor = tensor / (i + 1)
return 0
@torch.jit.script
def heavy_rpc_torchscript(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
@torch.jit.script
def my_script_func(tensor):
return torch.add(tensor, tensor)
expected_err = "Expected error"
# Note that it needs to inherit from Exception, not BaseException. See comment
# in rpc/internal.py
class CustomException(Exception):
def __init__(self, bool, msg):
self.bool = bool
super().__init__(msg)
def raise_func():
raise ValueError(expected_err)
def custom_raise_func():
raise CustomException(True, "foo")
@torch.jit.script
def raise_func_script(expected_err: str) -> torch.Tensor:
raise ValueError(expected_err)
expected_err_escape = "\nFirst line of error \n next line of error \n last line of error"
def raise_func_escape():
raise ValueError(expected_err_escape)
global_rref = None
def set_global_rref(rref):
global global_rref
global_rref = rref
def clear_global_rref():
global global_rref
global_rref = None
def check_rref_confirmed(rref):
return rref.confirmed_by_owner()
def get_rref_debug_info():
return _rref_context_get_debug_info()
def add_use_future_cb(to, x, y, z):
out = concurrent.futures.Future()
def callback(fut):
out.set_result(fut.wait() + z)
fut = rpc.rpc_async(to, torch.add, args=(x, y))
fut.then(callback)
return out.result()
def get_events_from_profile(profile_rref):
return profile_rref.local_value().process_global_function_events
def add_use_future_set_result(to, x, y, z):
out = torch.futures.Future()
fut = rpc.rpc_async(to, torch.add, args=(x, y))
fut.then(lambda fut : out.set_result(fut.wait() + z))
return out.wait()
def add_use_future_nested_cb(to, x, y, z):
out = torch.futures.Future()
def callback(fut1):
fut2 = rpc.rpc_async(to, torch.add, args=(fut1.wait(), z))
fut2.then(lambda fut2 : out.set_result(fut2.wait()))
fut1 = rpc.rpc_async(to, torch.add, args=(x, y))
fut1.then(callback)
return out.wait()
def fail_on_fut(fut):
pass
@rpc.functions.async_execution
def async_raise_func():
raise RuntimeError("Expected error")
@rpc.functions.async_execution
def async_wrong_type():
return torch.zeros(2, 2)
@rpc.functions.async_execution
def async_add(to, x, y):
return rpc.rpc_async(to, torch.add, args=(x, y))
def slow_add(x, y, device="cpu"):
time.sleep(1)
x = x.to(device)
y = y.to(device)
return torch.add(x, y).cpu()
@rpc.functions.async_execution
def slow_async_add(to, x, y, device="cpu"):
return rpc.rpc_async(to, slow_add, args=(x, y, device))
@rpc.functions.async_execution
def async_add_with_future_ctor(to, x, y, z):
fut = torch.futures.Future()
rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut1: fut.set_result(fut1.wait() + z)
)
return fut
@rpc.functions.async_execution
def async_add_chained(to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
@rpc.functions.async_execution
def async_add_chained_multi(to, x, num, step):
fut = rpc.rpc_async(to, torch.add, args=(x, 0))
for _ in range(num):
fut = fut.then(lambda fut: fut.wait() + step)
return fut
@rpc.functions.async_execution
def async_add_nested(to, x, y, z):
return rpc.rpc_async(to, async_add, args=(to, x, y)).then(
lambda fut: fut.wait() + z
)
@rpc.functions.async_execution
def async_add_multi_fanout(to, x, num, step):
futs = []
for i in range(num):
if i == 0:
futs.append(rpc.rpc_async(to, torch.add, args=(x, step)))
else:
futs.append(rpc.rpc_async(to, torch.add, args=(0, step)))
# TODO: use torch.futures.collect_all
lock = Lock()
state = {"cnt": 0, "ret": torch.zeros_like(x)}
ret_future = torch.futures.Future()
def inc_and_set(fut):
with lock:
state["cnt"] += 1
state["ret"] += fut.wait()
if state["cnt"] >= len(futs):
ret_future.set_result(state["ret"])
for fut in futs:
fut.then(inc_and_set)
return ret_future
@rpc.functions.async_execution
def async_cuda_sleep_and_set_to_one(t):
device = t.device
original_stream = torch.cuda.current_stream(device)
new_stream = torch.cuda.Stream(device)
new_stream.wait_stream(original_stream)
with torch.cuda.stream(new_stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
t.fill_(1)
fut = Future(devices=[device])
fut.set_result(t)
return fut
@rpc.functions.async_execution
def async_cuda_nested_add(to, x, y, z):
def cb(fut):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
return fut.value() + z
return rpc.rpc_async(to, torch.add, args=(x, y)).then(cb)
# A custom Python class that contains a tensor, needed to see if we correctly
# use the Python pickler to extract tensors from non-IValue-convertible types.
class TensorWrapper:
__slots__ = ("tensor", "lock", "event", "thread")
def __init__(self, t):
self.tensor = t
# Add one non-picklable field, to ensure it's ignored/skipped.
self.lock = Lock()
self.event = torch.cuda.Event(enable_timing=True)
self.thread = threading.Thread()
self.thread.start()
def increase(self, v):
with self.lock:
self.tensor += v
def sum(self):
with self.lock:
self.event.record()
return self.tensor.sum()
class AsyncExecutionClass:
@staticmethod
@rpc.functions.async_execution
def static_async_add(to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
@classmethod
@rpc.functions.async_execution
def class_async_add(cls, to, x, y, z):
ret_fut = torch.futures.Future()
rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: ret_fut.set_result(fut.wait() + z)
)
return ret_fut
@rpc.functions.async_execution
def bound_async_add(self, to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
def return_future():
return torch.futures.Future()
class FooBackendOptions(rpc.RpcBackendOptions):
def __init__(self, init_method):
# Must call the __init__ of the superclass (and do so directly,
# without using super()) because... pybind.
rpc.RpcBackendOptions.__init__(self)
self.init_method = init_method
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
class MyEmbeddingBagModel(torch.nn.Module):
def __init__(self, sparse):
super().__init__()
self.eb = torch.nn.EmbeddingBag(
10,
10,
sparse=sparse
)
def forward(self, x):
return self.eb(x)
class MyParameterServer:
def __init__(self, trainers):
self.lock = Lock()
self.trainers = trainers
self.iteration = 0
self.updates = 0
self.futures = []
self.total = None
self.gradient = None
@staticmethod
def get_gradient(rref):
return rref.local_value().gradient
@staticmethod
@rpc.functions.async_execution
def average(rref, riteration, tensor):
self = rref.local_value()
fut = torch.futures.Future()
with self.lock:
if riteration > self.iteration:
self.iteration = riteration
self.updates = 0
self.futures.clear()
self.futures.append(fut)
if self.total is None:
self.total = tensor
else:
self.total += tensor
self.updates += 1
if self.trainers == self.updates:
self.gradient = self.total / float(self.trainers)
for fut in self.futures:
result = self.total / float(self.trainers)
fut.set_result(result)
return fut
class MyConvNetForMNIST(nn.Module):
def __init__(self, device):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(1, 16, 3, 1),
nn.ReLU(),
nn.Conv2d(16, 32, 3, 1),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Flatten(1),
nn.Linear(4608, 128),
nn.ReLU(),
nn.Linear(128, 10),
).to(device)
self.device = device
def forward(self, x, is_rref=False):
x = x.to_here() if is_rref else x
with torch.cuda.stream(torch.cuda.current_stream(self.device)):
# intentionally adding delay to current CUDA stream
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
return self.net(x)
def __getstate__(self):
# return an empty dict to avoid inspecting the model contents on the
# owner
return {}
class RpcTestCommon:
def _run_func_in_mode(self, to, fn, mode, args=None, kwargs=None):
if mode == RPCExecMode.SYNC:
return rpc.rpc_sync(to, fn, args=args, kwargs=kwargs)
elif mode == RPCExecMode.ASYNC:
return rpc.rpc_async(to, fn, args=args, kwargs=kwargs).wait()
elif mode == RPCExecMode.REMOTE:
return rpc.remote(to, fn, args=args, kwargs=kwargs).to_here()
def _self_py_udf_remote(self, worker_info, x, y, z):
rref = rpc.remote(worker_info, my_function, args=(x, y, z))
self.assertEqual(rref.to_here(), x + y + z)
def _self_remote_rref_as_rpc_arg(self, dst, x, y, z):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(x, y, z))
fut = rpc.rpc_async(dst, add_rref_to_value, args=(rref, x))
ret = rpc.rpc_sync(dst, add_rref_to_value, args=(rref, x + y))
self.assertEqual(ret, x + y + z + x + y)
self.assertEqual(fut.wait(), x + y + z + x)
def _self_remote_rref_as_remote_arg(self, dst, x, y, z):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(x, y, z))
ret_rref = rpc.remote(dst, add_rref_to_value, args=(rref, x))
self.assertEqual(
ret_rref.to_here(), x + y + z + x
)
def _world_size_one(self, a, b):
if self.rank == 0:
rpc.init_rpc(
name="me",
backend=self.rpc_backend,
rank=0,
world_size=1,
rpc_backend_options=self.rpc_backend_options,
)
def _rpc_sync(x, y):
expect = x * 2
result = rpc.rpc_sync(
"me",
my_tensor_function,
args=(x, y)
)
self.assertEqual(expect, result)
def _rpc_async(x, y):
expect = x * 2
result = rpc.rpc_async(
"me",
my_tensor_function,
args=(x, y)
).wait()
self.assertEqual(expect, result)
def _remote(x, y):
expect = x * 2
result = rpc.remote(
"me",
my_tensor_function,
args=(x, y)
).to_here()
self.assertEqual(expect, result)
_rpc_sync(a, b)
_rpc_async(a, b)
_remote(a, b)
rpc.shutdown()
def _multi_rpc(self, sparse):
dst_rank = (self.rank + 1) % self.world_size
for i in range(20):
n = i + self.rank + 1
if sparse:
x = build_sparse_tensor() * n
y = build_sparse_tensor() * n
else:
x = torch.ones(2, 2)
y = torch.ones(2, 2)
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(x, y),
)
self.assertEqual(ret, x * 2)
def _run_uneven_workload(self, f, x, num_repeat=30):
# worker0 drives and waits for worker1 and worker2
# throughout the test.
if self.rank == 0:
self.assertTrue(self.world_size >= 3)
# Phase 1: Only worker1 has workload.
dst = "worker1"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, f, args=(x,))
futs.append(fut)
for fut in torch.futures.collect_all(futs).wait():
self.assertEqual(fut.wait(), 0)
# Phase 2: Only worker2 has workload.
# If join is not correctly implemented,
# worker2 should be closed by now.
dst = "worker2"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, f, args=(x,))
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
def _wait_all_workers(self, f, x):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=f"worker{self.rank:d}",
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload(f, x)
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
# Wait before proceeding to shutdown to ensure worker0 RPCs make
# it through to other workers.
dist.barrier()
rpc.shutdown(graceful=False)
def _wait_all_workers_twice(self, f, x):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=f"worker{self.rank:d}",
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload(f, x)
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
rpc.api._wait_all_workers()
# Wait before proceeding to shutdown to ensure worker0 RPCs make
# it through to other workers.
dist.barrier()
rpc.shutdown(graceful=False)
def _nested_rpc(self, f, expected):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
f,
args=(worker_name(self.rank),),
)
self.assertEqual(ret, expected)
def _stress_test_rpc(self, f, repeat=1000, args=()):
n = self.rank + 1
dst_rank = n % self.world_size
futs = []
tik = time.time()
for _ in range(repeat):
fut = rpc.rpc_async(worker_name(dst_rank), f, args=args)
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
tok = time.time()
print(
f"Rank {self.rank} finished testing {repeat} times in {tok - tik} seconds."
)
def _builtin_remote_ret(self, x, y, expected):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(x, y),
)
self.assertEqual(rref.to_here(), expected)
def _builtin_remote_self(self, x, y, expected):
rref = rpc.remote(
worker_name(self.rank),
torch.add,
args=(x, y),
)
self.assertEqual(rref.local_value(), expected)
def _test_multi_remote_call(self, fn, sparse, args_fn=lambda x, y: (), kwargs_fn=lambda x, y: {}):
m = 10
n = self.rank + 1
dst_rank = n % self.world_size
rrefs = []
expected = []
for i in range(m):
n = n + i
rrefs.append(
rpc.remote(
worker_name(dst_rank),
fn,
args=args_fn(n, sparse),
kwargs=kwargs_fn(n, sparse),
)
)
expected.append(fn(*args_fn(n, sparse), **kwargs_fn(n, sparse)))
for i in range(m):
self.assertEqual(rrefs[i].to_here(), expected[i])
def _py_rref_args(self, a, b, x, y, expected):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), torch.add, args=(a, b)
)
rref_b = rpc.remote(
worker_name(dst_rank), torch.add, args=(x, y)
)
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), expected)
def _py_rref_args_user_share(self, a, b, c, x, y, z, expected):
n = self.rank + 1
owner_rank = n % self.world_size
user_rank = (n + 1) % self.world_size
rref_a = rpc.remote(
worker_name(owner_rank), my_function, args=(a, b, c)
)
rref_b = rpc.remote(
worker_name(owner_rank), my_function, args=(x, y, z)
)
rref_c = rpc.remote(
worker_name(user_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), expected)
def _py_rpc_rref_args(self, a, b, c, x, y, z, expected):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), my_function, args=(a, b, c)
)
rref_b = rpc.remote(
worker_name(dst_rank), my_function, args=(x, y, z)
)
c = rpc.rpc_sync(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(c, expected)
def _nested_remote(self, f, expected):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.remote(
worker_name(dst_rank1),
f,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), expected)
def _nested_rref(self, f, expected1, expected2):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref_of_rrefs = rpc.remote(
worker_name(dst_rank1),
f,
args=(worker_name(dst_rank2),),
)
# Say C has 2 OwnerRRefs.
# B has 2 UserRRefs to those 2 OwnerRRefs, respectively.
# This call is effectively A asking B to share its 2 UserRRefs.
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), expected1)
self.assertEqual(rrefs[1].to_here(), expected2)
def _nested_rref_stress(self, f, expected1, expected2):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
all_rrefs = [
rpc.remote(
worker_name(dst_rank1),
f,
args=(worker_name(dst_rank2),),
) for _ in range(20)
]
for i in range(20):
rref_of_rrefs = all_rrefs[i]
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), expected1)
self.assertEqual(rrefs[1].to_here(), expected2)
def _trainer_func(self, rref, sparse):
m = MyEmbeddingBagModel(sparse=sparse)
loss_fn = nn.MSELoss()
for i in range(10):
outputs = m(torch.rand(10, 10).long())
loss_fn(outputs, torch.rand(10, 10)).backward()
gradient = next(iter(m.parameters())).grad
fut = rref.rpc_async().average(rref, i, gradient)
gradient = fut.wait()
if gradient.is_sparse:
gradient = gradient.to_dense().double()
ps_gradient = rref.rpc_sync().get_gradient(rref)
if ps_gradient.is_sparse:
ps_gradient = ps_gradient.to_dense().double()
self.assertTrue(torch.equal(gradient, ps_gradient))
def _my_parameter_server(self, sparse):
ps_rref = RRef(MyParameterServer(self.world_size - 1))
futures = [
rpc.rpc_async(
worker_name((self.rank + index) % self.world_size),
self._trainer_func,
args=(
ps_rref,
sparse
),
) for index in range(1, self.world_size)]
torch.futures.wait_all(futures)
def _test_cuda_future_extraction(self, wrapper, unwrapper, sparse_tensor):
# We check proper CUDA stream synchronization by adding to the tensor
# in one stream to get the expected value, and reading it from another stream.
future = Future(devices=["cuda:0"])
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
another_stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
if sparse_tensor:
tensor = build_sparse_tensor().to("cuda:0")
add_tensor = build_sparse_tensor().to("cuda:0")
expected_tensor = (tensor + add_tensor).coalesce()
else:
tensor = torch.zeros((100,), device="cuda:0")
add_tensor = torch.ones((100,), device="cuda:0")
expected_tensor = tensor + add_tensor
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor += add_tensor
if sparse_tensor:
tensor = tensor.coalesce()
future.set_result(wrapper(tensor))
with torch.cuda.stream(another_stream):
tensor = unwrapper(future.wait())
if sparse_tensor:
self.assertTrue(torch.eq(tensor.indices(), expected_tensor.indices()).all().item())
self.assertTrue(torch.eq(tensor.values(), expected_tensor.values()).all().item())
self.assertEqual(tensor.size(), expected_tensor.size())
else:
self.assertTrue(torch.eq(tensor, expected_tensor).all().item())
class RpcTest(RpcAgentTestFixture, RpcTestCommon):
@dist_init
def test_worker_id(self):
n = self.rank + 1
peer_rank = n % self.world_size
self_worker_info = rpc.get_worker_info()
peer_worker_info = rpc.get_worker_info(worker_name(peer_rank))
self.assertEqual(self_worker_info.name, worker_name(self.rank))
self.assertEqual(peer_worker_info.name, worker_name(peer_rank))
with self.assertRaisesRegex(RuntimeError, "could not find destination"):
rpc.get_worker_info("WorkerUnknown")
@dist_init
def test_get_worker_infos(self):
worker_infos = rpc.api._get_current_rpc_agent().get_worker_infos()
worker_names = {worker_info.name for worker_info in worker_infos}
expected_worker_names = {
worker_name(rank) for rank in range(self.world_size)
}
self.assertEqual(worker_names, expected_worker_names)
worker_ids = {worker_info.id for worker_info in worker_infos}
expected_worker_ids = set(range(self.world_size))
self.assertEqual(worker_ids, expected_worker_ids)
@dist_init
def test_self_add(self):
self_worker_info = rpc.get_worker_info()
fut = rpc.rpc_async(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
ret = rpc.rpc_sync(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertEqual(ret, torch.ones(2, 2) + 1)
@dist_init
def test_send_to_rank(self):
dst_rank = (self.rank + 1) % self.world_size
# Test dense tensor
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test invalid ranks
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(self.world_size + 1, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(-1, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(dst_rank + 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(dst_rank - 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
@dist_init
def test_self_py_udf_remote(self):
self._self_py_udf_remote(
rpc.get_worker_info(),
torch.ones(2, 2),
1,
3
)
@dist_init
def test_self_remote_rref_as_rpc_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._self_remote_rref_as_rpc_arg(
dst,
torch.ones(2, 2),
1,
3
)
@dist_init
def test_self_remote_rref_as_self_rpc_arg(self):
self._self_remote_rref_as_rpc_arg(
rpc.get_worker_info(),
torch.ones(2, 2),
1,
3
)
@dist_init
def test_self_remote_rref_as_remote_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._self_remote_rref_as_remote_arg(
dst,
torch.ones(2, 2),
1,
3
)
@dist_init
def test_self_remote_rref_as_self_remote_arg(self):
self._self_remote_rref_as_remote_arg(
rpc.get_worker_info(),
torch.ones(2, 2),
1,
3
)
@dist_init
def test_rref_proxy_non_exist(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
msg = "has no attribute 'non_exist'"
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_sync().non_exist()
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_async().non_exist().wait()
with self.assertRaisesRegex(AttributeError, msg):
rref.remote().non_exist()
def _test_rref_proxy_tensor(self, dst):
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
expected = torch.ones(2, 2) + 1 + 3
self.assertEqual(expected.size(), rref.rpc_sync().size())
self.assertEqual(expected + 1, rref.rpc_async().add(1).wait())
self.assertEqual(expected.view(1, 4), rref.remote().view(1, 4).to_here())
@dist_init
def test_rref_proxy_tensor(self):
self._test_rref_proxy_tensor(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_tensor_self(self):
self._test_rref_proxy_tensor(rpc.get_worker_info())
@dist_init
def test_rref_proxy_reuse(self):
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
my_function,
args=(torch.ones(2, 2), 1, 3)
)
expected = torch.ones(2, 2) + 1 + 3
proxy_rpc_sync = rref.rpc_sync()
proxy_rpc_async = rref.rpc_async()
proxy_remote = rref.remote()
self.assertEqual(expected.size(), proxy_rpc_sync.size())
self.assertEqual(expected + 1, proxy_rpc_sync.add(1))
self.assertEqual(expected.view(1, 4), proxy_rpc_sync.view(1, 4))
self.assertEqual(expected.size(), proxy_rpc_async.size().wait())
self.assertEqual(expected + 3, proxy_rpc_async.add(3).wait())
self.assertEqual(expected.view(4, 1), proxy_rpc_async.view(4, 1).wait())
self.assertEqual(expected.size(), proxy_remote.size().to_here())
self.assertEqual(expected + 5, proxy_remote.add(5).to_here())
self.assertEqual(expected.view(-1), proxy_remote.view(-1).to_here())
def _test_rref_proxy_class(self, dst):
rref = rpc.remote(dst, MyClass, args=(7,))
expected = MyClass(7)
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
expected.increment_value(3)
self.assertEqual(None, rref.rpc_sync().increment_value(1))
self.assertEqual(None, rref.rpc_async().increment_value(1).wait())
self.assertEqual(None, rref.remote().increment_value(1).to_here())
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
self.assertEqual(
expected.my_instance_method(2),
rref.rpc_sync().my_instance_method(2)
)
self.assertEqual(
expected.my_instance_method(3),
rref.rpc_async().my_instance_method(3).wait()
)
self.assertEqual(
expected.my_instance_method(4),
rref.remote().my_instance_method(4).to_here()
)
self.assertEqual(
expected.my_static_method(9),
rref.rpc_sync().my_static_method(9)
)
self.assertEqual(
expected.my_static_method(10),
rref.rpc_async().my_static_method(10).wait()
)
self.assertEqual(
expected.my_static_method(11),
rref.remote().my_static_method(11).to_here()
)
self.assertEqual(
expected.my_class_method(2, torch.zeros(2, 2)),
rref.rpc_sync().my_class_method(2, torch.zeros(2, 2))
)
self.assertEqual(
expected.my_class_method(2, torch.ones(3, 3)),
rref.rpc_async().my_class_method(2, torch.ones(3, 3)).wait()
)
self.assertEqual(
expected.my_class_method(2, torch.ones(4, 4)),
rref.remote().my_class_method(2, torch.ones(4, 4)).to_here()
)
@dist_init
def test_rref_proxy_class(self):
self._test_rref_proxy_class(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_class_self(self):
self._test_rref_proxy_class(rpc.get_worker_info())
@mock.patch.object(torch.distributed.autograd, "_init")
@mock.patch.object(torch.distributed.rpc.api, "_set_and_start_rpc_agent")
@dist_init(setup_rpc=False)
def test_register_rpc_backend_and_set_and_start_rpc_backend(
self, mock_rpc_agent, mock_dist_autograd_init
):
backend_name = "stub_backend"
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
with self.assertRaisesRegex(
RuntimeError, "^RPC backend .+: already registered$"
):
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
rpc.init_rpc(
name="worker1",
backend=backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_duplicate_name(self):
with self.assertRaisesRegex(RuntimeError, "is not unique"):
store, _, _ = next(
torch.distributed.rendezvous(
self.init_method, rank=self.rank, world_size=self.world_size
)
)
rpc._init_rpc_backend(
backend=self.rpc_backend,
store=store,
name="duplicate_name",
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_duplicate_name_2(self):
with self.assertRaisesRegex(RuntimeError, "is not unique"):
rpc.init_rpc(
name=worker_name(self.rank % (self.world_size - 1)),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_reinit(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
# TODO: with TCP init, rank 0 raises Address already in use because
# rank 0 is the start daemon and the store is created before checking if
# RPC is already initialized in init_rpc.
if os.environ.get("RPC_INIT_WITH_TCP", None) == "1" and self.rank == 0:
expected_reinit_err = "Address already in use"
else:
expected_reinit_err = "is already initialized"
with self.assertRaisesRegex(RuntimeError, expected_reinit_err):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
@dist_init(setup_rpc=False)
def test_pg_init_no_rpc_init(self):
dist.init_process_group(
backend='gloo',
init_method=self.file_init_method,
rank=self.rank,
world_size=self.world_size)
class MyModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.lin = torch.nn.Linear(3, 4)
def forward(self, x):
return self.lin(x)
model = MyModel()
model.train()
model = torch.nn.parallel.DistributedDataParallel(model)
with self.assertRaisesRegex(RuntimeError, 'Current RPC agent is not set! Did you initialize the RPC framework'):
[RRef(param) for param in model.parameters()]
def test_world_size_one(self):
self._world_size_one(
torch.ones(2, 2),
torch.ones(2, 2)
)
@dist_init(setup_rpc=False)
def test_invalid_names(self):
worker_id = 0
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
WorkerInfo("abc*", worker_id)
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
WorkerInfo(" ", worker_id)
with self.assertRaisesRegex(RuntimeError, "must be non-empty"):
WorkerInfo("", worker_id)
# If the number in the message does not match, it is likely that the
# value of MAX_NAME_LEN in RPC WorkerInfo has changed.
with self.assertRaisesRegex(RuntimeError, "shorter than 128"):
WorkerInfo("".join(["a" for i in range(500)]), worker_id)
# Test that WorkerInfo can be pickled and sent in RPC call
@dist_init
def test_worker_info_pickle(self):
dst_rank = (self.rank + 1) % self.world_size
worker_info = rpc.api.get_worker_info()
ret = rpc.rpc_sync(worker_name(dst_rank), identity, args=(worker_info,))
self.assertEqual(ret, worker_info)
@dist_init
def test_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@staticmethod
def return_callee_id():
return rpc.get_worker_info().id
@dist_init
def test_int_callee(self):
dst_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(dst_rank, RpcTest.return_callee_id)
self.assertEqual(ret, dst_rank)
@dist_init
def test_add_with_id(self):
n = self.rank + 1
dst_rank = n % self.world_size
workder_info = rpc.get_worker_info(worker_name(dst_rank))
ret = rpc.rpc_sync(
workder_info, torch.add, args=(torch.ones(n, n), torch.ones(n, n))
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@dist_init
def test_scalar_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), n)
)
self.assertEqual(ret, (torch.ones(n, n) + n))
@dist_init
def test_async_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_nonzero(self):
n = self.rank + 1
dst_rank = n % self.world_size
x = torch.ones(self.world_size, self.world_size)
x[self.rank][self.rank] = 0
ret = rpc.rpc_sync(worker_name(dst_rank), torch.nonzero, args=(x,))
self.assertEqual(ret, x.nonzero())
@dist_init
def test_multi_rpc(self):
self._multi_rpc(False)
@dist_init
def test_future_wait_twice(self):
dst = worker_name((self.rank + 1) % self.world_size)
futs = [rpc.rpc_async(dst, raise_func) for _ in range(20)]
with self.assertRaisesRegex(ValueError, "Expected error"):
torch.futures.wait_all(futs)
for fut in futs:
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
@dist_init(setup_rpc=False)
def test_wait_all_workers_timeout(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
og_func = rpc.api._wait_all_workers
def wait_all_workers_sleep(timeout):
rpc.api._all_gather(SlowPickleClass(0.5), timeout=timeout)
rpc.api._wait_all_workers = wait_all_workers_sleep
try:
with self.assertRaisesRegex(RuntimeError, ''):
rpc.shutdown(graceful=True, timeout=0.01)
finally:
rpc.api._wait_all_workers = og_func
dist.barrier()
def test_wait_all_workers_dense(self):
self._wait_all_workers(heavy_rpc, torch.ones(100, 100))
def test_wait_all_workers_twice_dense(self):
self._wait_all_workers_twice(heavy_rpc, torch.ones(100, 100))
@dist_init
def test_all_gather(self):
info = rpc.get_worker_info()
results = rpc.api._all_gather(info.id)
expected = {}
for info in rpc._get_current_rpc_agent().get_worker_infos():
expected[info.name] = info.id
self.assertEqual(expected, results)
@dist_init
def test_all_gather_timeout(self):
rpc._set_rpc_timeout(0.1)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError,
"timed out in _all_gather after 0\\.10 seconds"
):
rpc.api._all_gather(SlowPickleClass(0.5))
else:
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.api._all_gather(SlowPickleClass(0.5))
def _test_barrier_helper(self, info, names, multi_threaded=False):
names = sorted(names)
leader = names[0]
rpc.rpc_sync(leader, _reset_count)
if not multi_threaded and info.name == leader:
self.assertEqual(_rpc_barrier_count, 0)
rpc.api._barrier(names)
rpc.rpc_sync(leader, _increment_count)
rpc.api._barrier(names)
if not multi_threaded and info.name == leader:
self.assertEqual(_rpc_barrier_count, len(names))
@dist_init
def test_rpc_barrier_all(self):
# Test rpc barrier when called with full list of workers
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
names = [worker.name for worker in all_worker_info]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_subset(self):
# Test rpc barrier when processes are called with different subsets of the full list
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
if info.id % 2:
names = [worker.name for worker in all_worker_info if worker.id % 2]
else:
names = [worker.name for worker in all_worker_info if not worker.id % 2]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_partial_subset(self):
# Test rpc barrier when some processes are not involved in the barrier
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
if info.id % 2:
names = [worker.name for worker in all_worker_info if worker.id % 2]
else:
names = [f"worker{info.id}"]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_multithreaded(self):
# This tests validates the implementation of barrier when multiple threads call into it
# We only need to check that it does not hang in this case
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
names = [worker.name for worker in all_worker_info]
threads = []
for _ in range(3):
th = threading.Thread(target=self._test_barrier_helper, args=(info, names, True))
threads.append(th)
th.start()
for th in threads:
th.join()
@dist_init
def test_graceful_shutdown_with_uneven_workload(self):
"""Test graceful termination."""
self._run_uneven_workload(heavy_rpc, torch.ones(100, 100))
@dist_init(setup_rpc=False)
def test_shutdown_followed_by_rpc(self):
# Initialize RPC.
rpc.init_rpc(
name=f"worker{self.rank:d}",
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
rpc.shutdown()
with self.assertRaisesRegex(RuntimeError, "^RPC has not been initialized"):
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
@dist_init
def test_expected_src(self):
dst_rank = (self.rank + 1) % self.world_size
expected_src_rank = (self.rank - 1) % self.world_size
rpc.rpc_sync(worker_name(dst_rank), set_value, args=(self.rank,))
value = VALUE_FUTURE.result()
self.assertEqual(value, expected_src_rank)
@dist_init
def test_py_built_in(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), min, args=(n, n + 1, n + 2))
self.assertEqual(ret, min(n, n + 1, n + 2))
@dist_init
def test_py_user_defined(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(ret, my_function(n, n + 1, n + 2))
def test_build_rpc_profiling_key(self):
# Tests that the name that shows up as an Event in profiling RPCs has all
# the necessary information.
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
rpc_profiling_key = _build_rpc_profiling_key(
exec_mode, "foo", "worker0", "worker1"
)
self.assertIn(exec_mode.value, rpc_profiling_key)
self.assertIn("foo", rpc_profiling_key)
self.assertIn("worker0", rpc_profiling_key)
self.assertIn("worker1", rpc_profiling_key)
def check_profiling_info(self, self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode):
self.assertTrue(self_worker_name in rpc_event.name)
self.assertTrue(dst_worker_name in rpc_event.name)
if isinstance(func, torch.jit.ScriptFunction):
self.assertTrue(torch._jit_internal._qualified_name(func) in rpc_event.name)
else:
self.assertTrue(func.__name__ in rpc_event.name)
self.assertTrue(rpc_exec_mode.value in rpc_event.name)
self.assertEqual(rpc_event.count, 1)
@dist_init
def test_profiler_rpc_record_shapes(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
t1, t2 = torch.ones(100), torch.ones(100)
with _profile(record_shapes=True) as prof:
rpc.rpc_sync(dst_worker, torch.add, args=(t1, t2))
function_events = prof.function_events
remote_events = [event for event in function_events if event.is_remote]
remote_add_event = next(
event for event in remote_events if "aten::add" in event.name
)
remote_add_input_shapes = remote_add_event.input_shapes
# Run profiler on equivalent local op and validate shapes are the same.
with _profile(record_shapes=True) as prof:
torch.add(t1, t2)
local_function_events = prof.function_events
local_add_event = next(
event for event in local_function_events if "aten::add" in event.name
)
local_add_input_shapes = local_add_event.input_shapes
self.assertEqual(remote_add_input_shapes, local_add_input_shapes)
@dist_init
def test_profiler_rpc_memory(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with _profile(profile_memory=True) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
fut.wait()
function_events = p.function_events
event_cpu_mem_usages = {event.cpu_memory_usage for event in function_events}
# if cpu_memory_usage was not propagated over the wire, this set would
# only contain 0 (indicates no memory being profiled)
self.assertNotEqual({0}, event_cpu_mem_usages)
# No memory profiled if profile_memory=False
with _profile(profile_memory=False) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
fut.wait()
function_events = p.function_events
event_cpu_mem_usages = {event.cpu_memory_usage for event in function_events}
self.assertEqual({0}, event_cpu_mem_usages)
@dist_init
def test_profiler_export_trace(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with _profile() as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
fut.wait()
with TemporaryFileName() as fname:
path = fname
p.export_chrome_trace(path)
with open(path) as f:
trace = json.load(f)
event_names = [event['name'] for event in trace]
for expected_event_name in EXPECTED_REMOTE_EVENTS + [RPCExecMode.ASYNC.value]:
event_exists = any(expected_event_name in event_name for event_name in event_names)
self.assertTrue(event_exists)
@dist_init
def test_profiler_rpc_key_names(self):
# tests that remote events are properly prefixed with the RPC profiling key.
if self.rank != 1:
return
# Spawn multiple threads that send RPCs to ensure keys are correctly
# prefixed when there are multiple RPCs being created/in flight at the
# same time.
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
def rpc_with_profiling(dst_worker):
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
fut.wait()
events = prof.function_events
remote_event_names = {
event.name: event for event in events if event.is_remote
}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
dst_worker,
)
remote_event_name_set = set(EXPECTED_REMOTE_EVENTS)
for name, event in remote_event_names.items():
# Ensure that we have the expected key as part of the remote
# event.
self.assertTrue(name.startswith(rpc_profiling_key))
self.assertTrue(event.is_remote)
self.assertTrue(event.node_id == rpc.get_worker_info(dst_worker).id)
# Ensure that the remote event name also contains the operator.
operator_name_substr = name[len(rpc_profiling_key) :]
# Note: we don't assert that every remote event needs to be
# in the above set, the set is just a representative set of
# what we expect to see. The profiler can change and add more
# events, but we should always expect to see this representative
# set.
matching_event = {
remote_event_name
for remote_event_name in remote_event_name_set
if remote_event_name in operator_name_substr
}
remote_event_name_set -= matching_event
# The set should be empty, otherwise its contained elements did
# not show up in the remote profiler output.
self.assertTrue(
remote_event_name_set == set(),
f"Expected {remote_event_name_set} to be included in remote profiler output.",
)
for dst in dst_ranks:
dst_worker = worker_name(dst)
num_parallel_rpcs = 2
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_parallel_rpcs
) as executor:
futs = [
executor.submit(rpc_with_profiling, dst_worker)
for _ in range(num_parallel_rpcs)
]
# Wait for workers to finish test
for fut in futs:
fut.result()
def _run_test_profiler_remote_events_profiled(self):
# Tests that we can successfully invoke the profiler on a remote node,
# and collect the remote events back in the local profiler.
if self.rank != 1:
return
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
for dst in dst_ranks:
dst_worker = worker_name(dst)
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
fut.wait()
events = prof.function_events
rpc_event = get_function_event(events, RPCExecMode.ASYNC.value)
self.check_profiling_info(
worker_name(self.rank),
dst_worker,
udf_with_torch_ops,
rpc_event,
RPCExecMode.ASYNC,
)
remote_events = {event.name: event for event in events if event.is_remote}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
worker_name(dst),
)
for expected_remote_event_name in EXPECTED_REMOTE_EVENTS:
expected_key = rpc_profiling_key + REMOTE_OP_STR + expected_remote_event_name
self.assertTrue(expected_key in remote_events)
remote_event = remote_events[expected_key]
# Remote event should have a node ID corresponding to the worker
# it ran on.
self.assertEqual(remote_event.node_id, dst)
# Validate order remote events show up in profiling output.
def convert_remote_to_local(event_name):
remote_op_key = rpc_profiling_key + REMOTE_OP_STR
return event_name[
event_name.find(remote_op_key)
+ len(remote_op_key) :
]
remote_events_list = [
convert_remote_to_local(event.name)
for event in events
if convert_remote_to_local(event.name) in EXPECTED_REMOTE_EVENTS
]
self.assertEqual(
set(remote_events_list),
set(EXPECTED_REMOTE_EVENTS),
f"Mismatch between profiled events: {set(remote_events_list)} and expected events: {set(EXPECTED_REMOTE_EVENTS)}",
)
@dist_init
def test_profiler_remote_events_profiled(self):
self._run_test_profiler_remote_events_profiled()
@dist_init
def test_profiler_remote_events_profiled_single_threaded(self):
self._run_test_profiler_remote_events_profiled()
def run_profiling_workload(self, dst):
fut = rpc.rpc_async(
worker_name(dst),
torch.mul,
args=(
torch.tensor(1.0, requires_grad=True),
torch.tensor(1.0, requires_grad=True),
),
)
fut.wait()
def _run_rpc_profiling_async_function(self, device="cpu"):
if self.rank != 1:
return
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
with _profile() as prof:
ret = rpc.rpc_async(
dst1, slow_async_add, args=(dst2, x, y, device), timeout=20
)
ret.wait()
function_events = prof.function_events
# slow_async_add resulted in an RPC from dst1 -> dst2, so this should be
# recorded.
key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_async_add.__qualname__, worker_name(self.rank), dst1
)
nested_rpc_key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_add.__qualname__, dst1, dst2
)
expected_key = key_prefix + REMOTE_OP_STR + nested_rpc_key_prefix
remote_events = [event for event in function_events if event.is_remote]
rpc_remote_event = [
event for event in remote_events if event.name == expected_key
]
self.assertEqual(1, len(rpc_remote_event))
rpc_remote_event = rpc_remote_event[0]
self.assertEqual(rpc_remote_event.node_id, (self.rank + 1) % self.world_size)
# slow_async_add's RPC does an add on dst2, which should be reflected as well.
remote_add_key = (
expected_key + REMOTE_OP_STR + torch.jit._builtins._find_builtin(torch.add)
)
remote_add_event = [
event for event in remote_events if event.name == remote_add_key
]
self.assertEqual(1, len(remote_add_event))
remote_add_event = remote_add_event[0]
# Validate that node_id is dst2.
self.assertEqual(remote_add_event.node_id, (self.rank + 2) % self.world_size)
@dist_init
def test_rpc_profiling_async_function(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@dist_init
def test_rpc_profiling_async_function_single_threaded(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@dist_init
def test_rpc_profiling_remote_record_function(self):
# test that functions run over RPC with record_function show the expected
# profiled block.
if self.rank != 1:
return
dst_ranks = [i for i in range(self.world_size) if i != self.rank]
for dst_rank in dst_ranks:
dst_worker = worker_name(dst_rank)
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=(-1, True))
fut.wait()
function_events = prof.function_events
record_function_remote_event = [
evt for evt in function_events if "##forward##" in evt.name
]
self.assertEqual(1, len(record_function_remote_event))
record_function_remote_event = record_function_remote_event[0]
self.assertEqual(record_function_remote_event.node_id, dst_rank)
# cpu_children only returns direct children, so here we get all
# children recursively.
def get_cpu_children(event):
if not event.cpu_children:
return []
cpu_children = event.cpu_children
for e in event.cpu_children:
cpu_children.extend(get_cpu_children(e))
return cpu_children
remote_children = get_cpu_children(record_function_remote_event)
# Get local children and verify parity.
with _profile() as prof:
udf_with_torch_ops(-1, True)
local_function_events = prof.function_events
local_record_function_event = next(
evt for evt in local_function_events if "##forward##" in evt.name
)
local_children = get_cpu_children(local_record_function_event)
local_children_names = [
evt.name for evt in local_children
]
REMOTE_OP_STR = "#remote_op: "
def convert_remote_to_local(event_name):
remote_op_key = REMOTE_OP_STR
return event_name[
event_name.find(remote_op_key) + len(remote_op_key) :
]
for evt in remote_children:
local_name = convert_remote_to_local(evt.name)
self.assertTrue(local_name in local_children_names)
def validate_profiling_workload(self, dst, prof):
def convert_remote_to_local(event_name):
return event_name[event_name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR) :]
events = prof.function_events
remote_events = {
convert_remote_to_local(event.name): event
for event in events
if event.is_remote
}
self.assertTrue("aten::mul" in remote_events)
remote_mul_event = remote_events["aten::mul"]
self.assertEqual(remote_mul_event.node_id, dst)
self.check_profiling_info(
worker_name(self.rank),
worker_name(dst),
torch.mul,
remote_mul_event,
RPCExecMode.ASYNC,
)
def _run_test_profiler_with_autograd_context(self):
dst = (self.rank + 1) % self.world_size
if self.rank == 1:
# Cases where we can double wrap messages with profiling information and autograd info.
with dist_autograd.context():
with _profile() as prof:
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
# Ensure that flipped order of ctx managers results in events being
# recorded as expected.
with _profile() as prof:
with dist_autograd.context():
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
@dist_init
def test_profiler_with_autograd_context_single_threaded(self):
self._run_test_profiler_with_autograd_context()
@dist_init
def test_profiler_with_autograd_context(self):
self._run_test_profiler_with_autograd_context()
def _profiler_test_with_rpc(
self, rpc_exec_mode, func, args, use_record_function=False, dst=None, kineto_profile=False
):
dst = dst if dst is not None else (self.rank + 1) % self.world_size
# only run profiler on rank 1.
p = _profile if not kineto_profile else torch.profiler.profile # kineto
if self.rank == 1:
with p() as prof:
record_function_ctx_mgr = (
contextlib.nullcontext()
if not use_record_function
else torch.autograd.profiler.record_function(
"foo"
)
)
with record_function_ctx_mgr:
if rpc_exec_mode == RPCExecMode.SYNC:
rpc.rpc_sync(worker_name(dst), func, args=args)
elif rpc_exec_mode == RPCExecMode.ASYNC:
fut = rpc.rpc_async(worker_name(dst), func, args=args)
if kineto_profile:
# Ensure multiple async RPCs don't cause issues.
# Would have raised
# "RuntimeError: Cannot call
# RemoteProfilerManager::setCurrentKey when current
# key is already set." error if RPC profiling was
# not disabled properly for kineto.
fut2 = rpc.rpc_async(worker_name(dst), func, args=args)
fut2.wait()
fut.wait()
else:
self.assertTrue(rpc_exec_mode == RPCExecMode.REMOTE)
rref = rpc.remote(worker_name(dst), func, args=args)
rref.to_here()
# To avoid flakiness, wait for the RRef to be profiled. This
# means that we received the acknowledgement of successful
# creation on the owner and ran the callbacks responsible
# for recording the profiling event.
rref._get_profiling_future().wait()
events = prof.function_events if not kineto_profile else prof.events()
if kineto_profile:
# RPC profiling is disabled so there should be no rpc related
# events.
with self.assertRaises(IndexError):
get_function_event(events, rpc_exec_mode.value)
return
rpc_event = get_function_event(events, rpc_exec_mode.value)
# verify Node ID for this rpc event.
self.assertEqual(rpc_event.node_id, self.rank)
# Ensure recording of remote events.
remote_events = {event for event in events if event.node_id == dst} - {rpc_event}
self.assertGreaterEqual(len(remote_events), 1)
for remote_event in remote_events:
self.assertEqual(remote_event.node_id, dst)
if use_record_function:
scope_event = get_function_event(events, "foo")
# Since RPC call is within the scope, its CPU interval should be
# contained within foo's interval.
self.assertLessEqual(scope_event.time_range.start, rpc_event.time_range.start)
self.assertGreaterEqual(scope_event.time_range.end, rpc_event.time_range.end)
# the sender, dest worker, function run, and type of RPC should all
# be recorded.
self_worker_name = worker_name(self.rank)
dst_worker_name = worker_name(dst)
self.check_profiling_info(self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode)
if use_record_function:
# verify order by ensuring that the outer context comes
# before the rpc event.
foo_event_ix = next(i for i, event in enumerate(events) if "foo" in event.name)
rpc_event_idx = next(i for i, event in enumerate(events) if rpc_exec_mode.value in event.name)
self.assertLess(foo_event_ix, rpc_event_idx)
def _run_test_profiler_with_sync_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,),
use_record_function=True)
@dist_init
def test_profiler_with_sync_rpc_udf(self):
self._run_test_profiler_with_sync_rpc_udf()
@dist_init
def test_profiler_with_sync_rpc_udf_single_threaded(self):
self._run_test_profiler_with_sync_rpc_udf()
def _run_test_profiler_with_sync_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
@dist_init
def test_profiler_with_sync_rpc_builtin(self):
self._run_test_profiler_with_sync_rpc_builtin()
@dist_init
def test_profiler_with_sync_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_sync_rpc_builtin()
def _run_test_profiler_with_async_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,),
use_record_function=True)
# Test to ensure that kineto profiler enabled in RPC does not enable
# RPC profiling (it is unsupported) and does not result in issues.
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, my_sleep_func, args=(1,), kineto_profile=True
)
@dist_init
def test_profiler_with_async_rpc_udf(self):
self._run_test_profiler_with_async_rpc_udf()
@dist_init
def test_profiler_with_async_rpc_udf_single_threaded(self):
self._run_test_profiler_with_async_rpc_udf()
def _run_test_profiler_with_async_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
@dist_init
def test_profiler_with_async_rpc_builtin(self):
self._run_test_profiler_with_async_rpc_builtin()
@dist_init
def test_profiler_with_async_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_async_rpc_builtin()
def _run_test_profiler_with_remote_udf(self):
self._profiler_test_with_rpc(RPCExecMode.REMOTE, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), use_record_function=True
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), dst=self.rank
)
@dist_init
def test_profiler_with_remote_udf(self):
self._run_test_profiler_with_remote_udf()
@dist_init
def test_profiler_with_remote_udf_single_threaded(self):
self._run_test_profiler_with_remote_udf()
def _run_test_profiler_with_remote_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
torch.mul,
args=(torch.ones(1), torch.ones(1)),
dst=self.rank,
)
@dist_init
def test_profiler_with_remote_builtin(self):
self._run_test_profiler_with_remote_builtin()
@dist_init
def test_profiler_with_remote_builtin_single_threaded(self):
self._run_test_profiler_with_remote_builtin()
def _run_test_profiler_with_script_async_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_async_rpc(self):
self._run_test_profiler_with_script_async_rpc()
@dist_init
def test_profiler_with_script_async_rpc_single_threaded(self):
self._run_test_profiler_with_script_async_rpc()
def _run_test_profiler_with_script_sync_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_sync_rpc(self):
self._run_test_profiler_with_script_sync_rpc()
@dist_init
def test_profiler_with_script_sync_rpc_single_threaded(self):
self._run_test_profiler_with_script_sync_rpc()
def _run_test_profiler_with_script_remote_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),), dst=self.rank
)
@dist_init
def test_profiler_with_script_remote_rpc(self):
self._run_test_profiler_with_script_remote_rpc()
@dist_init
def test_profiler_with_script_remote_rpc_single_threaded(self):
self._run_test_profiler_with_script_remote_rpc()
def _assert_top_level_events(self, process_global_events, expected_top_level_event_names):
top_level_event_names = []
for thread_local_events in process_global_events:
# Get top-level events from all events happened on a thread.
last_end_time = 0
for event in thread_local_events:
event_name = event.name
time_range = event.time_range
if time_range.start > last_end_time:
top_level_event_names.append(event_name)
last_end_time = time_range.end
top_level_event_names = sorted(top_level_event_names)
expected_top_level_event_names = sorted(expected_top_level_event_names)
self.assertEqual(
top_level_event_names,
expected_top_level_event_names,
f"Expected events {expected_top_level_event_names}, but got {top_level_event_names}",
)
@dist_init
def test_server_process_global_profiler(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker_name = worker_name(dst_rank)
x = torch.tensor(1)
y = torch.tensor(2)
outer_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
outer_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.add, (x, y))
inner_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
inner_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.sub, (x, y))
inner_profile_rref.rpc_sync().__exit__(None, None, None)
outer_profile_rref.rpc_sync().__exit__(None, None, None)
inner_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (inner_profile_rref,))
expected_inner_events = ['aten::sub']
expected_outer_events = expected_inner_events + ['aten::add']
self._assert_top_level_events(inner_events, expected_inner_events)
outer_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (outer_profile_rref,))
self._assert_top_level_events(outer_events, expected_outer_events)
inner_profile_rref.rpc_sync().key_averages()
outer_profile_rref.rpc_sync().key_averages()
@dist_init
def test_async_record_function_double_end_callbacks(self):
num_sleep_seconds = 1
if self.rank == 1:
# Validate that calling the function twice results in an error.
with _profile():
with torch.autograd.profiler.record_function("foo") as rf:
fut = rpc.rpc_async(
worker_name(0), my_sleep_func, args=(num_sleep_seconds,)
)
rf._call_end_callbacks_on_future(fut)
with self.assertRaisesRegex(
RuntimeError, "can only be called once."
):
rf._call_end_callbacks_on_future(fut)
fut.wait()
@dist_init
def test_async_record_function_legacy(self):
# Test the legacy _record_function ops work
# Note: These exist for backward compatibility with TorchScript
num_sleep_seconds = 1
if self.rank == 1:
with _profile():
try:
handle = torch.ops.profiler._record_function_enter("foo", None)
fut = rpc.rpc_async(
worker_name(0), my_sleep_func, args=(num_sleep_seconds,)
)
torch.ops.profiler._call_end_callbacks_on_jit_fut(handle, fut)
finally:
torch.ops.profiler._record_function_exit(handle)
fut.wait()
@dist_init
def test_async_record_function_cbs_jit_call(self):
if self.rank == 1:
with _profile() as pf:
key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
torch._jit_internal._qualified_name(my_script_func),
"worker1",
"worker0",
)
with torch.autograd.profiler.record_function(key) as rf:
fut = rpc.rpc_async(
worker_name(0), my_script_func, args=(torch.tensor(1),)
)
# Intentionally calling record_function internals
fut = torch.ops.profiler._call_end_callbacks_on_jit_fut(rf.record, fut)
result = fut.wait()
# Validate that the profiling future returns the same value as the RPC
# future.
expected = torch.add(torch.tensor(1), torch.tensor(1))
self.assertEqual(result, expected)
events = pf.function_events
rpc_event = get_function_event(
events, torch._jit_internal._qualified_name(my_script_func)
)
self.assertTrue(torch._jit_internal._qualified_name(my_script_func) in rpc_event.name)
@dist_init
def test_py_class_constructor(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), MyClass, args=(n,))
self.assertEqual(ret.a, n)
@dist_init
def test_py_class_instance_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass(2).my_instance_method, args=(n,)
)
self.assertEqual(ret, MyClass(2).my_instance_method(n))
@dist_init
def test_py_class_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_class_method, args=(n, n + 1)
)
self.assertEqual(ret, MyClass.my_class_method(n, n + 1))
@dist_init
def test_py_class_static_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_static_method, args=(n + 10,)
)
self.assertEqual(ret, MyClass.my_static_method(n + 10))
@dist_init
def test_py_multi_async_call(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker_info = rpc.get_worker_info(worker_name(dst_rank))
fut1 = rpc.rpc_async(dst_worker_info, MyClass.my_static_method, args=(n + 10,))
fut2 = rpc.rpc_async(dst_worker_info, min, args=(n, n + 1, n + 2))
self.assertEqual(fut1.wait(), MyClass.my_static_method(n + 10))
self.assertEqual(fut2.wait(), min(n, n + 1, n + 2))
@dist_init
def test_py_no_return_result(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), no_result)
self.assertEqual(ret, no_result())
@dist_init
def test_py_tensors(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, my_tensor_function(torch.ones(n, n), torch.ones(n, n)))
@dist_init
def test_py_tensors_multi_async_call(self):
futs = []
n = self.rank + 1
dst_rank = n % self.world_size
for i in range(100):
fut = rpc.rpc_async(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(i, i), torch.ones(i, i)),
)
futs.append(fut)
for j, val in enumerate(torch.futures.wait_all(futs)):
self.assertEqual(
val, my_tensor_function(torch.ones(j, j), torch.ones(j, j))
)
@dist_init
def test_py_tensors_in_container(self):
n = self.rank + 1
dst_rank = n % self.world_size
a = [torch.ones(n, n), torch.ones(n, n)]
b = TensorClass(build_complex_tensors())
c = {"foo": torch.ones(n, n), "bar": torch.ones(n, n)}
ret = rpc.rpc_sync(
worker_name(dst_rank), my_complex_tensor_function, args=(a, b, c)
)
self.assertEqual(ret, my_complex_tensor_function(a, b, c))
@dist_init
def test_py_nested_pickle(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
run_nested_pickle,
args=(MyPickleClass(), torch.ones(2, 2)),
)
m = MyPickleClass()
m.set(my_tensor_function(torch.ones(2, 2), torch.ones(2, 2)))
self.assertEqual(ret, run_nested_pickle(m, torch.ones(2, 2)))
@dist_init
def test_py_function_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
with self.assertRaises(TypeError):
rpc.rpc_sync(worker_name(dst_rank), no_result, args=(10,))
@dist_init
def test_py_raise_in_user_func(self):
with captured_output() as (_, err):
# This barrier prevents a race condition where the main thread has
# not entered the context manager when the remote function runs.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func)
with self.assertRaisesRegex(ValueError, expected_err):
fut.wait()
# This barrier prevents a race condition where the main thread exits
# context manager before the remote function has ran.
dist.barrier()
# Validate that trainers log errors when running functions.
stderr_lines = err.getvalue()
self.assertTrue(expected_err in stderr_lines)
@dist_init
def test_py_raise_in_user_func_escaped_str(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func_escape)
try:
fut.wait()
except ValueError as e:
msg = str(e)
# Ensure newlines are unescaped to provide a better repr of error.
self.assertEqual(msg, msg.encode("utf-8").decode("unicode_escape"))
else:
self.assertTrue(False, "expected raise_func_escape to raise ValueError.")
@dist_init
def test_nested_rpc(self):
self._nested_rpc(nested_rpc, torch.ones(2, 2) + 1)
@dist_init
def test_stress_light_rpc(self):
self._stress_test_rpc(light_rpc)
@dist_init
def test_stress_heavy_rpc(self):
self._stress_test_rpc(heavy_rpc, repeat=20, args=(torch.ones(100, 100),))
@dist_init
def test_stress_heavy_rpc_torchscript(self):
self._stress_test_rpc(heavy_rpc_torchscript, repeat=20, args=(torch.ones(100, 100),))
@dist_init
def test_builtin_remote_ret(self):
self._builtin_remote_ret(
torch.ones(2, 2),
torch.ones(2, 2),
torch.ones(2, 2) * 2
)
@dist_init
def test_builtin_remote_self(self):
self._builtin_remote_self(
torch.ones(2, 2),
torch.ones(2, 2),
torch.ones(2, 2) * 2
)
@staticmethod
def _multi_args_fn(n, sparse=False):
if sparse:
return (build_sparse_tensor(), build_sparse_tensor())
else:
return (torch.ones(n, n), torch.ones(n, n))
@dist_init
def test_multi_builtin_remote_ret(self):
self._test_multi_remote_call(
torch.add, False,
args_fn=RpcTest._multi_args_fn
)
@dist_init
def test_py_udf_remote(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(rref.to_here(), my_function(n, n + 1, n + 2))
@staticmethod
def _multi_kwargs_fn(n, sparse=False):
if sparse:
return {
"a": build_sparse_tensor(),
"b": build_sparse_tensor(),
"c": build_sparse_tensor()
}
else:
return {"a": torch.ones(n, n), "b": torch.ones(n, n), "c": torch.ones(n, n)}
@dist_init
def test_multi_py_udf_remote(self):
self._test_multi_remote_call(
my_function,
False,
kwargs_fn=RpcTest._multi_kwargs_fn
)
@dist_init
def test_py_rref_args(self):
self._py_rref_args(
torch.ones(2, 2),
1,
torch.ones(2, 2),
2,
torch.ones(2, 2) * 2 + 3)
@dist_init
def test_py_rref_args_user_share(self):
self._py_rref_args_user_share(
torch.ones(2, 2),
1,
2,
torch.ones(2, 2),
3,
4,
torch.ones(2, 2) * 2 + 10
)
@dist_init
def test_py_rpc_rref_args(self):
self._py_rpc_rref_args(
torch.ones(2, 2),
1,
2,
torch.ones(2, 2),
3,
4,
torch.ones(2, 2) * 2 + 10
)
@dist_init
def test_nested_remote(self):
self._nested_remote(
nested_remote,
torch.ones(2, 2) + 3
)
@dist_init
def test_nested_rref(self):
self._nested_rref(
nested_rref,
torch.ones(2, 2) + 1,
torch.ones(2, 2) + 2
)
@dist_init
def test_nested_rref_stress(self):
self._nested_rref_stress(
nested_rref,
torch.ones(2, 2) + 1,
torch.ones(2, 2) + 2
)
@dist_init
def test_multi_layer_nested_async_rpc(self):
# This test will exit right away, but there will be a chain of async
# RPCs. The termination algorithm should detect those messages properly.
# Otherwise, some peer could exit early, leaving others to timeout
# errors or connection closed errors.
ttl = 20
n = self.rank + 1
dst_rank = n % self.world_size
multi_layer_nested_async_rpc(dst_rank, self.world_size, ttl)
@dist_init
def test_remote_with_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
# check ref to other workers
rref = rpc.remote(worker_name(dst_rank), raise_func)
with self.assertRaises(ValueError):
rref.to_here()
# check ref to itself
rref = rpc.remote(worker_name(self.rank), no_result, args=(10,))
with self.assertRaises(TypeError):
rref.to_here()
@dist_init
def test_rpc_return_rref(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.rpc_sync(
worker_name(dst_rank1),
rpc_return_rref,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1)
@dist_init
def test_rref_forward_chain(self):
ttl = 8
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
ret_rref = rref_forward_chain(dst_rank, self.world_size, rref, ttl)
for _ in range(ttl):
self.assertEqual(len(ret_rref), 1)
ret_rref = ret_rref[0].to_here()
ret = ret_rref
self.assertEqual(ret, torch.add(torch.ones(n, n), 1))
@dist_init
def test_local_rref_no_fork(self):
local_rref = RRef(35)
self.assertEqual(local_rref.local_value(), 35)
@dist_init
def test_local_value_not_on_owner(self):
# ensure that an error message is thrown if a user tries to call
# local_value() on a non-owning node.
next_rank = (self.rank + 1) % self.world_size
rref = rpc.remote(
worker_name(next_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
with self.assertRaisesRegex(
RuntimeError, (
fr"For UserRRef\(rref_id=GloballyUniqueId\(created_on={self.rank}, local_id=0\), "
fr"fork_id=GloballyUniqueId\(created_on={self.rank}, local_id=1\)\), "
r"can't call localValue\(\) on user "
fr"WorkerInfo\(id={self.rank}, name={worker_name(self.rank)}\). "
fr"Call it on owner WorkerInfo\(id={next_rank}, name={worker_name(next_rank)}\)"
)
):
rref.local_value()
@dist_init
def test_return_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_list = rpc.rpc_sync(
worker_name(dst_rank), get_rref_list, args=([1, 2, 3],)
)
for rref in rref_list:
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, 10),
)
rets = [
rpc.rpc_sync(
rref.owner(), _call_method_on_rref, args=(MyClass.get_value, rref)
)
for rref in rref_list
]
self.assertEqual(rets, [11, 12, 13])
@dist_init
def _test_rref_type(self, blocking):
def launched_rpc(events):
expected_name = f"rpc_{RPCExecMode.ASYNC.value}#_rref_typeof_on_owner"
return any(e.name.startswith(expected_name) for e in events)
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, torch.add, args=(torch.ones(2), 1))
with _profile() as p:
t = rref._get_type(blocking=blocking)
if not blocking:
t = t.wait()
self.assertTrue(launched_rpc(p.function_events))
expected_type = type(torch.ones(2))
self.assertEqual(t, expected_type)
futs = []
def verify(fut):
self.assertEqual(fut.value(), expected_type)
with _profile() as p:
for _ in range(10):
t = rref._get_type(blocking=blocking)
if not blocking:
futs.append(t)
t.add_done_callback(verify)
t = t.wait()
self.assertEqual(t, expected_type)
if not blocking:
# Note that cached calls with blocking=False all return the same
# cached original future.
first_fut = futs[0]
for f in futs[1:]:
self.assertTrue(f is first_fut)
# Ensure we never launch another RPC, other than for the very
# first call.
self.assertFalse(launched_rpc(p.function_events))
self.assertEqual(t, type(torch.ones(2)))
rref = rpc.remote(dst, MyClass, args=(0,))
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, MyClass)
def test_rref_type_blocking(self):
self._test_rref_type(blocking=True)
def test_rref_type_non_blocking(self):
self._test_rref_type(blocking=False)
@dist_init
def _test_rref_type_with_error(self, blocking):
dst = worker_name((self.rank + 1) % self.world_size)
# 10 ms timeout
rref = rpc.remote(dst, raise_func)
# Blocking: error raised inline
if blocking:
with self.assertRaisesRegex(ValueError, "Expected error"):
rref._get_type(blocking=blocking)
else:
# Non-blocking: Immediately return future, block on wait
fut = rref._get_type(blocking=blocking)
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
def test_rref_type_with_error_blocking(self):
self._test_rref_type_with_error(blocking=True)
def test_rref_type_with_error_non_blocking(self):
self._test_rref_type_with_error(blocking=False)
@dist_init
def _test_rref_type_owner(self, blocking):
rref = RRef(torch.ones(2) + 1)
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, type(torch.ones(2)))
rref = RRef(MyClass(0))
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, MyClass)
def test_rref_type_owner_blocking(self):
self._test_rref_type_owner(blocking=True)
def test_rref_type_owner_non_blocking(self):
self._test_rref_type_owner(blocking=False)
@staticmethod
def _slow_add(x, y):
time.sleep(1)
return x + y
@dist_init
def test_rref_type_slow_init(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, RpcTest._slow_add, args=(torch.ones(2), 1))
self.assertEqual(rref._get_type(), type(torch.ones(2)))
@dist_init
def test_owner_equality(self):
a = RRef(40)
b = RRef(50)
other_rank = (self.rank + 1) % self.world_size
other_a = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_b = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_a.to_here() # to ensure clean termination
other_b.to_here()
self.assertNotEqual(a.owner(), 23)
self.assertEqual(other_a.owner(), other_b.owner())
self.assertNotEqual(a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_b.owner())
self.assertEqual(a.owner(), a.owner())
self.assertEqual(a.owner(), b.owner())
self.assertEqual(a.owner(), rpc.get_worker_info())
x = {}
x[a.owner()] = a
x[other_a.owner()] = other_a
self.assertEqual(x[a.owner()], a)
self.assertEqual(x[b.owner()], a)
self.assertEqual(x[other_a.owner()], other_a)
self.assertEqual(x[other_b.owner()], other_a)
self.assertEqual(len(x), 2)
@dist_init
def test_pass_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker = worker_name(dst_rank)
rref = RRef(40)
self.assertEqual(
rpc.rpc_sync(dst_worker, add_rref_to_value, args=(rref, 50)), 90
)
self.assertEqual(
rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 50)).wait(), 90
)
self.assertEqual(
rpc.remote(dst_worker, add_rref_to_value, args=(rref, 50)).to_here(), 90
)
@dist_init
def test_remote_same_worker(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 2)
)
rref_b = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init(setup_rpc=True)
def test_call_method_on_rref(self):
"""
Tests that it is possible to call an instance method on a remote object
by using rref.owner() as destination of the call.
"""
vals = [10, 2, 5, 7]
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# creates a remote object
rref = rpc.remote(dst_worker, MyClass, args=(vals[0],))
# modifies state of the remote object
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[1]),
)
rpc.rpc_async(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[2]),
).wait()
rpc.remote(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[3]),
).to_here()
# queries state of the remote object
result = rpc.rpc_sync(
dst_worker, _call_method_on_rref, args=(MyClass.get_value, rref)
)
self.assertEqual(result, sum(vals))
# Notice `rpc.api.shutdown()` accesses
# `_delete_all_user_and_unforked_owner_rrefs` through
# `torch.distributed.rpc.api`, so patching
# `torch.distributed.rpc._delete_all_user_and_unforked_owner_rrefs` will
# not help.
@mock.patch.object(torch.distributed.rpc.api, "_delete_all_user_and_unforked_owner_rrefs")
def _test_rref_leak(self, _mock_delete_all_user_and_unforked_owner_rrefs, ignore_leak):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
rref = rpc.remote( # noqa: F841
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.ones(2, 2), 1),
)
import torch.distributed.rpc.api as api
if ignore_leak:
api._ignore_rref_leak = True
rpc.shutdown(graceful=True)
else:
api._ignore_rref_leak = False
with self.assertRaisesRegex(RuntimeError, "Leaking RRef"):
rpc.shutdown(graceful=True)
@dist_init(setup_rpc=False)
def test_rref_leak(self):
self._test_rref_leak(ignore_leak=False)
@dist_init(setup_rpc=False)
def test_ignore_rref_leak(self):
self._test_rref_leak(ignore_leak=True)
@dist_init
def test_rref_str(self):
rref1 = RRef(self.rank)
id_class = "GloballyUniqueId"
self.assertEqual(
f"OwnerRRef({id_class}(created_on={self.rank}, local_id=0))", rref1.__str__()
)
dst_rank = (self.rank + 1) % self.world_size
rref2 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
self.assertEqual(
rref2.__str__(),
f"UserRRef(RRefId = {id_class}(created_on={self.rank}, local_id=1), "
f"ForkId = {id_class}(created_on={self.rank}, local_id=2))",
)
@dist_init
def test_rref_get_future(self):
# Tests that we can obtain the future corresponding to the creation of
# the RRef on remote end
if self.rank == 0:
# Builtin
rref = rpc.remote(worker_name(1), torch.add, args=(1, 1))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# UDF
rref = rpc.remote(worker_name(1), foo_add, args=())
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# Script
rref = rpc.remote(worker_name(1), my_script_func, args=(torch.tensor(1), ))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
@dist_init
def test_rref_context_debug_info(self):
# This test checks local states that are modified by remote workers.
# This means that we would need barrier before and after every check.
# The barrier before the check makes sure that all previous states are
# cleared globally, the barrier after ensures that no following states
# change gets into the current check.
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Check 1: local RRef does not update owners_ map or add a pending user.
#################################################
rref1 = RRef(self.rank)
# don't need a barrier here as local RRef is handled by this thread
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertIn("num_pending_users", info)
# RRef on local value is not added to context until shared across RPC
self.assertEqual(0, int(info["num_owner_rrefs"]))
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after the check 1
dist.barrier()
# Check 2: Sharing RRef as an arg should update owners_ map
###########################################################
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(worker_name(dst_rank), set_global_rref, args=(rref1,))
# barrier before check 2
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(1, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 2
dist.barrier()
# clear states for check 2
rpc.rpc_sync(worker_name(dst_rank), clear_global_rref)
# Wait for owner rref to be cleared.
while int(info["num_owner_rrefs"]) != 0:
info = _rref_context_get_debug_info()
time.sleep(0.1)
dist.barrier()
# Check 3: rpc.remote call should update owners_ map
####################################################
rref2 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
rref3 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
rref2.to_here()
rref3.to_here()
# barrier before check 3
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(2, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 3
dist.barrier()
@dist_init
def test_disable_gil_profiling(self):
# test that rpc.enable_gil_profiling(false) will result in
# GIL wait time not being recorded.
# GIL profiling should be disabled by default.
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertRaises(KeyError, lambda: info["agent.gil_average_wait_time_us"])
rpc.enable_gil_profiling(True)
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertIn("agent.gil_average_wait_time_us", info)
@dist_init(setup_rpc=False)
def test_local_shutdown(self):
# test that we can start RPC and then immediately locally shutdown
# without sending any messages.
rpc.init_rpc(
name=f"worker{self.rank:d}",
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init
def test_debug_info(self):
# only test keys in this test case. Values should be covered by
# individual module debug info tests
import torch.distributed.autograd as dist_autograd
info = _get_debug_info()
rref_info = _rref_context_get_debug_info()
agent_info = rpc.api._get_current_rpc_agent().get_debug_info()
autograd_info = dist_autograd._get_debug_info()
common_keys = rref_info.keys() & agent_info.keys() & autograd_info.keys()
self.assertEqual(0, len(common_keys))
expected = {}
expected.update(rref_info)
expected.update(agent_info)
expected.update(autograd_info)
# NB: Key ordering is only preserved in python 3.6+. So here, we
# manually check keys are equal.
for key in expected.keys():
self.assertIn(key, info.keys())
for key in info.keys():
self.assertIn(key, expected.keys())
@dist_init(setup_rpc=False)
@skip_but_pass_in_sandcastle_if(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_handle_send_exceptions(self):
# test that if a callee node has gone down, we raise an appropriate
# exception instead of just crashing.
rpc.init_rpc(
name=f"worker{self.rank:d}",
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc._set_rpc_timeout(10)
# This barrier is needed to ensure that some workers do not exit before
# others have been brought up.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
if self.rank == 1:
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# allow destination worker to exit without joining
error_str = self.get_shutdown_error_regex()
wait_until_node_failure(dst_rank, error_str)
fut = rpc.rpc_async(dst_worker, torch.add, args=(torch.ones(1), 3))
# Shutdown sequence is not very well defined and as a result
# we can see any of the error messages defined in get_shutdown_error_regex.
with self.assertRaisesRegex(RuntimeError, error_str):
fut.wait()
# exit all workers non-gracefully.
rpc.shutdown(graceful=False)
@dist_init
def test_deadlock(self):
# this test is copied from https://github.com/pytorch/pytorch/issues/45089
if self.rank == 1:
dst1 = worker_name((self.rank + 1) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
rpc.rpc_async(dst1, RpcTest._slow_add, args=(x, y), timeout=15).wait()
dist_initialized = dist.is_initialized()
if not dist_initialized:
dist.init_process_group(
backend="gloo",
init_method=self.file_init_method,
rank=self.rank,
world_size=self.world_size,
)
@dist_init(setup_rpc=False)
def test_local_shutdown_with_rpc(self):
# test that we can start RPC, send RPCs, and then run local shutdown.
rpc.init_rpc(
name=f"worker{self.rank:d}",
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# A barrier is needed to ensure that all RPCs are processed.
# Otherwise, some RPCs can timeout since the receiving end
# has terminated.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init(setup_rpc=False)
def test_set_and_get_default_rpc_timeout(self):
timeout = 0.5
# A new `RpcBackendOptions` is constructed
# when accessing `self.rpc_backend_options`.
rpc_backend_options = self.rpc_backend_options
rpc_backend_options.rpc_timeout = timeout
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
set_timeout = rpc.get_rpc_timeout()
self.assertEqual(timeout, set_timeout)
rpc.shutdown()
@dist_init
def test_default_timeout_used(self):
"""
Tests that if no timeout is passed into rpc_async and rpc_sync, then the
default timeout is used.
"""
dst_rank = (self.rank + 1) % self.world_size
rpc._set_rpc_timeout(0.001) # 1 ms
# futures should time out and be marked with an exception indicating it as such.
futs = [
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=())
for _ in range(10)
]
expected_error = self.get_timeout_error_regex()
for fut in futs:
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# ensure that if a new timeout is set old futures don't time out but new ones do.
rpc._set_rpc_timeout(200) # 200 seconds
# create a longstanding RPC.
fut1 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
# now, set a short timeout.
rpc._set_rpc_timeout(0.001)
# fut2 should time out, fut1 should not.
fut2 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut2.wait()
fut1.wait()
# Zero timeout means infinity, so future should run to completion.
rpc._set_rpc_timeout(0)
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=()).wait()
# reset to default timeout so shutdown messages can process cleanly.
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init
def test_rpc_timeouts(self):
# TODO: enable timeouts for rpc.remote/RRef (https://github.com/pytorch/pytorch/issues/33803)
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
timeout = 0.1 # 100 ms
expected_error = self.get_timeout_error_regex()
# Test async UDF
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,)).wait()
# Test sync UDF
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# If we set a default timeout for RPCs, it should be respected, though
# still overridden if we pass in a different timeout to the APIs.
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# The RPCs should run to completion since we override the timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=5).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=5)
# Passing in a zero timeout should ensure that the RPC won't time out.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=0).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=0)
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
def test_dist_init_decorator(self):
@dist_init(setup_rpc=False)
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
@dist_init
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
def test_use_rpc_pickler(self):
class TestPickler:
pass
test_pickler = TestPickler()
with _use_rpc_pickler(test_pickler):
self.assertTrue(torch.distributed.rpc.api._default_pickler is test_pickler)
self.assertTrue(
torch.distributed.rpc.api._default_pickler is _internal_rpc_pickler
)
@dist_init
def test_wait_all(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1))
self.assertTrue(len(_thread_local_var.future_list) == 1)
self.assertTrue(isinstance(_thread_local_var.future_list[0], torch._C.Future))
self.assertTrue(fut.done())
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_multiple_call(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
for i in range(20):
fut = rpc.rpc_async(dst, torch.add, (torch.ones(i, i), 1))
res = rpc.rpc_sync(dst, torch.add, (torch.ones(i, i), 1))
self.assertEqual(res, torch.ones(i, i) + 1)
self.assertEqual(fut.wait(), torch.ones(i, i) + 1)
self.assertTrue(len(_thread_local_var.future_list) == 20)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_timeout(self):
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
timeout = 0.1 # 100 ms
rpc.rpc_async(dst, my_sleep_func, args=(1,), timeout=timeout)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_user_func(self):
with self.assertRaises(ValueError):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
rpc.rpc_async(dst, raise_func)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_body(self):
with self.assertRaises(ValueError):
with _wait_all():
raise_func()
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_custom_exception_throw_during_reconstruction(self):
"""
Test that we still throw info about the remote side exception even when
we cannot recreate it on client side.
"""
initialize_pg(self.file_init_method, self.rank, self.world_size)
if self.rank != 0:
exc_caught = False
dst = worker_name(0)
try:
rpc.rpc_sync(dst, custom_raise_func, args=())
except RuntimeError as e:
exc_caught = True
msg = str(e)
print(f"Got msg {msg}")
self.assertTrue("Original exception on remote side was" in msg)
self.assertTrue("CustomException" in msg)
except BaseException as e:
raise RuntimeError(
f"Failure - expected RuntimeError, got {e}"
) from e
finally:
self.assertTrue(exc_caught)
dist.barrier()
timed_out_rpc_event = None
@staticmethod
def timed_out_rpc():
RpcTest.timed_out_rpc_event.wait()
@dist_init
def test_wait_all_exit_early_python(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, raise_func)
fut3 = rpc.rpc_async(dst, raise_func)
# We should receive the error from fut2
with self.assertRaisesRegex(ValueError, expected_err):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_wait_all_exit_early_builtin(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5)))
fut3 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5)))
# We should receive the error from fut2
with self.assertRaisesRegex(RuntimeError, "size of tensor"):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_wait_all_exit_early_script_function(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,))
fut3 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,))
# We should receive the error from fut2
with self.assertRaisesRegex(RuntimeError, expected_err):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_function_not_on_callee(self):
# test that if a function does not exist on a callee, we don't crash,
# instead we get an AttributeError indicating that the func does not exist.
this_module = sys.modules[__name__]
caller_worker = "worker0"
callee_worker = "worker1"
if self.rank == 1:
# Use delattr to remove the binding of a func on this nodes
delattr(this_module, "foo_add")
# notify remote end that we have removed it.
rpc.rpc_sync(caller_worker, set_value, args=(self.rank,))
if self.rank == 0:
# func exists on caller, but not callee.
# wait for remote end to remove the binding of foo_add func.
wait_for_value_future()
# Ensure that we have the attribute on this module. Otherwise, the test could fail due to a caller-side pickling error.
self.assertTrue(hasattr(this_module, "foo_add"))
with self.assertRaisesRegex(
RuntimeError, "RPC pickler does not serialize"
):
rpc.rpc_sync(callee_worker, foo_add, args=())
@dist_init
def test_non_garbage_collected_user_rref_due_to_local_circular_dependency(self):
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
a = MyClass(1)
b = MyClass(2)
# This is to make Python not garbage collect a and b.
a.other = b
b.other = a
n = self.rank
a.rref = rpc.remote(
dst_worker_name,
torch.add,
args=(torch.ones(n, n), 2)
)
@dist_init(setup_rpc=False)
def test_use_rref_after_shutdown(self):
rpc.init_rpc(
name=f"worker{self.rank:d}",
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# pass in graceful=True to ensure that local UserRRefs are deleted.
rpc.shutdown(graceful=True)
with self.assertRaisesRegex(
RuntimeError, "Cannot call to_here\\(\\) on it after deletion."
):
rref.to_here()
with self.assertRaisesRegex(
RuntimeError, "Cannot call fork an UserRRef after deletion."
):
import torch.distributed.rpc.internal as internal
internal.serialize(rref)
@staticmethod
def _return_gpu_tensor():
return torch.rand(3, 3).cuda(0)
@staticmethod
def _return_gpu_tensor_list():
return [torch.rand(3, 3).cuda(0), torch.rand(3, 3).cuda(1)]
@staticmethod
def _gpu_tensor_list_arg(tensor_list):
return torch.rand(3, 3)
def _create_rref(self):
owner_rank = (self.rank + 2) % self.world_size
return rpc.remote(
worker_name(owner_rank),
torch.add,
args=(torch.zeros(2, 2), 1)
)
@dist_init
def test_user_rrefs_confirmed(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret = rpc.rpc_sync(
worker_name(dst_rank),
check_rref_confirmed,
args=(rref,)
)
self.assertEqual(ret, True)
@dist_init
def test_user_rrefs_confirmed_remote(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret_rref = rpc.remote(
worker_name(dst_rank),
check_rref_confirmed,
args=(rref,)
)
self.assertEqual(ret_rref.to_here(), True)
@dist_init
def test_rref_py_pickle_not_supported(self):
local_rref = RRef(35)
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, "Can not pickle rref in python pickler"):
torch.save(local_rref, fname)
@dist_init
def test_remote_throw(self):
rref = rpc.remote(worker_name((self.rank + 1) % self.world_size),
raise_or_inc,
args=(torch.ones(2),))
with self.assertRaisesRegex(Exception, ".*Expected error.*"):
rref.to_here()
@dist_init
def test_non_cont_tensors(self):
if self.rank == 0:
# Create a non-contiguous tensor.
t = torch.rand(5, 5)
t_view = t.narrow(1, 2, 2)
self.assertFalse(t_view.is_contiguous())
t_cont = t_view.contiguous()
self.assertTrue(t_cont.is_contiguous())
self.assertEqual(t_view, t_cont)
# Send non-cont tensor over RPC.
next_rank = (self.rank + 1) % self.world_size
t_ret = rpc.rpc_sync(worker_name(next_rank), non_cont_test, args=(t_view, t_cont))
# Verify the returned tensor.
self.assertEqual(t_view, t_ret)
self.assertFalse(t_ret.is_contiguous())
@dist_init
def test_callback_simple(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
def callback(fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
set_by_cb.set_result(ret.clone() + 1)
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
self.assertEqual(set_by_cb.result(), torch.ones(n, n) * 2 + 1)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_wrong_arg_num(self):
n = self.rank + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_fut = fut.then(my_function)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
with self.assertRaisesRegex(
RuntimeError,
"my\\_function\\(\\) missing 2 required positional arguments"
):
cb_fut.wait()
@dist_init
def test_callback_wrong_arg_type(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut0 = rpc.rpc_async(dst, torch.add, args=(torch.ones(2, 2), 1))
fut1 = fut0.then(lambda x: x + 1)
with self.assertRaisesRegex(
RuntimeError,
"unsupported operand type\\(s\\) for \\+"
):
fut1.wait()
@dist_init
def test_callback_multi(self):
num_cbs = 10
n = self.rank + 1
def callback(idx, fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
return ret + idx
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_futs = [fut.then(partial(callback, idx)) for idx in range(num_cbs)]
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
for idx in range(num_cbs):
self.assertEqual(
cb_futs[idx].wait(),
torch.ones(n, n) * 2 + idx
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_chain(self):
n = self.rank + 1
def callback(fut):
return fut.wait() + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), 1)
)
num_cbs = 20
for _ in range(num_cbs):
fut = fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) + 1 + num_cbs)
@dist_init
def test_callback_in_rpc(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
add_use_future_cb,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_callback_with_ret(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
fut2 = rpc.rpc_async(
dst,
torch.add,
args=(fut0.wait(), 1)
).then(lambda fut1: fut1.wait() + 1)
return fut2.wait()
fut3 = rpc.rpc_async(
dst,
torch.add,
args=(torch.ones(2, 2), 1)
).then(callback)
self.assertEqual(fut3.wait(), torch.ones(2, 2) + 3)
@dist_init
def test_callback_with_error(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
with self.assertRaisesRegex(ValueError, "Expected error"):
fut0.wait()
raise RuntimeError("Another expected error")
fut1 = rpc.rpc_async(dst, raise_func).then(callback)
with self.assertRaisesRegex(RuntimeError, "Another expected error"):
fut1.wait()
@dist_init
def test_callback_none(self):
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
TypeError,
"incompatible function arguments."
):
rpc.rpc_async(dst, raise_func).then(None)
@dist_init
def test_add_done_callback(self):
set_by_cb = False
n = self.rank + 1
def callback(fut):
nonlocal set_by_cb
fut.wait()
set_by_cb = True
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.add_done_callback(callback)
fut_then = fut.then(lambda _: True)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
# We have no guarantee that the add_done_callback fn will execute before the test finishes.
# Adding a 'then' callback that runs afterwards to guarantee we wait for the first callback
fut_then.wait()
self.assertTrue(set_by_cb)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_mark_future_twice(self):
fut = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.zeros(2, 2), 1)
)
self.assertEqual(fut.wait(), torch.zeros(2, 2) + 1)
with self.assertRaisesRegex(
RuntimeError,
"Future can only be marked completed once"
):
fut.set_result(1)
@dist_init
def test_pickle_future(self):
fut = torch.futures.Future()
errMsg = "Can not pickle torch.futures.Future"
dst = worker_name((self.rank + 1) % self.world_size)
with TemporaryFileName():
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_sync(dst, fail_on_fut, args=(fut,))
with TemporaryFileName():
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_async(dst, fail_on_fut, args=(fut,))
with TemporaryFileName():
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.remote(dst, fail_on_fut, args=(fut,))
@dist_init
def test_future_done(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, args=(torch.zeros(2), 1))
fut.wait()
self.assertTrue(fut.done())
@dist_init
def test_future_done_exception(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, raise_func)
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
self.assertTrue(fut.done())
def _test_future_cb(self, func):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
func,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_future_in_rpc(self):
self._test_future_cb(add_use_future_set_result)
@dist_init
def test_future_nested_callback(self):
self._test_future_cb(add_use_future_nested_cb)
def _test_async_function_raise(self, mode):
with self.assertRaisesRegex(RuntimeError, "Expected error"):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_raise_func,
mode
)
@dist_init
def test_async_function_raise(self):
self._test_async_function_raise(RPCExecMode.SYNC)
@dist_init
def test_async_function_raise_async(self):
self._test_async_function_raise(RPCExecMode.ASYNC)
@dist_init
def test_async_function_raise_remote(self):
self._test_async_function_raise(RPCExecMode.REMOTE)
def _test_async_function_wrong_return_type(self, mode):
errMsg = (
"Functions decorated with @rpc\\.async_function must return a "
"torch\\.futures\\.Future object,"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_wrong_type,
mode
)
@dist_init
def test_async_function_wrong_return_type(self):
self._test_async_function_wrong_return_type(RPCExecMode.SYNC)
@dist_init
def test_async_function_wrong_return_type_async(self):
self._test_async_function_wrong_return_type(RPCExecMode.ASYNC)
@dist_init
def test_async_function_wrong_return_type_remote(self):
self._test_async_function_wrong_return_type(RPCExecMode.REMOTE)
@dist_init
def test_async_function_simple(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(dst1, async_add, args=(dst2, torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
def _test_async_function(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
args = (dst2, torch.ones(2, 2), 1, 2)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + 3)
@dist_init
def test_async_function_with_future_ctor(self):
self._test_async_function(async_add_with_future_ctor)
@dist_init
def test_async_function_with_future_ctor_remote(self):
self._test_async_function(
async_add_with_future_ctor,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_chained(self):
self._test_async_function(async_add_chained)
@dist_init
def test_async_function_chained_remote(self):
self._test_async_function(async_add_chained, RPCExecMode.REMOTE)
@dist_init
def test_async_function_nested(self):
self._test_async_function(async_add_nested)
@dist_init
def test_async_function_nested_remote(self):
self._test_async_function(async_add_nested, RPCExecMode.REMOTE)
@dist_init
def test_async_static_method(self):
self._test_async_function(AsyncExecutionClass.static_async_add)
@dist_init
def test_async_static_method_remote(self):
self._test_async_function(
AsyncExecutionClass.static_async_add,
RPCExecMode.REMOTE
)
@dist_init
def test_async_class_method(self):
self._test_async_function(AsyncExecutionClass.class_async_add)
@dist_init
def test_async_class_method_remote(self):
self._test_async_function(
AsyncExecutionClass.class_async_add,
RPCExecMode.REMOTE
)
def _test_test_async_class_rref_proxy(self, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
rref = rpc.remote(dst1, AsyncExecutionClass)
x = torch.ones(2, 2)
y = torch.ones(2, 2) + 1
if mode == RPCExecMode.SYNC:
ret = rref.rpc_sync().static_async_add(dst2, x, x, y)
ret += rref.rpc_sync().class_async_add(dst2, x, x, y)
ret += rref.rpc_sync().bound_async_add(dst2, x, x, y)
elif mode == RPCExecMode.ASYNC:
ret = rref.rpc_async().static_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().class_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().bound_async_add(dst2, x, x, y).wait()
elif mode == RPCExecMode.REMOTE:
ret = rref.remote().static_async_add(dst2, x, x, y).to_here()
ret += rref.remote().class_async_add(dst2, x, x, y).to_here()
ret += rref.remote().bound_async_add(dst2, x, x, y).to_here()
self.assertEqual(ret, 3 * 4 * x)
@dist_init
def test_async_class_rref_proxy(self):
self._test_test_async_class_rref_proxy()
@dist_init
def test_async_class_rref_proxy_async(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.ASYNC)
@dist_init
def test_async_class_rref_proxy_remote(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.REMOTE)
def _test_async_function_multi(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
num = 20
step = 3
args = (dst2, torch.ones(2, 2), num, step)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + num * step)
@dist_init
def test_async_function_multi_chained(self):
self._test_async_function_multi(async_add_chained_multi)
@dist_init
def test_async_function_multi_chained_async(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_chained_remote(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_multi_fanout(self):
self._test_async_function_multi(async_add_multi_fanout)
@dist_init
def test_async_function_multi_fanout_async(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_fanout_remote(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.REMOTE
)
def _test_return_future(self, mode):
with self.assertRaisesRegex(
RuntimeError,
"Can not pickle torch.futures.Future"
):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
return_future,
mode
)
@dist_init
def test_return_future(self):
self._test_return_future(RPCExecMode.SYNC)
@dist_init
def test_return_future_async(self):
self._test_return_future(RPCExecMode.ASYNC)
@dist_init
def test_return_future_remote(self):
self._test_return_future(RPCExecMode.REMOTE)
@dist_init
def test_rref_timeout(self):
# This test is similar to ones in FaultyProcessGroupTest, but is meant to be
# run with other backends besides ProcessGroup.
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = f"worker{dst_rank}"
# 10 ms timeout
rref = rpc.remote(dst_worker, my_sleep_func, args=(2, ), timeout=0.01)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
wait_until_owners_and_forks_on_rank(1, 1, rank=1)
@dist_init(setup_rpc=False)
@skip_but_pass_in_sandcastle_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_pg_then_rpc does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614."
)
def test_init_pg_then_rpc(self):
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init(setup_rpc=False)
@skip_but_pass_in_sandcastle_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_rpc_then_pg does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614."
)
def test_init_rpc_then_pg(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init
def test_wait_all_with_exception(self):
dst = worker_name((self.rank + 1) % self.world_size)
futs = [rpc.rpc_async(dst, raise_func) for _ in range(10)]
with self.assertRaisesRegex(ValueError, "Expected error"):
torch.futures.wait_all(futs)
@dist_init
def test_wait_all_with_partial_exception(self):
dst = worker_name((self.rank + 1) % self.world_size)
futs = [rpc.rpc_async(dst, torch.add, args=(torch.ones(2), 1)) for _ in range(10)]
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
torch.futures.wait_all(futs)
@dist_init(setup_rpc=False)
@skip_but_pass_in_sandcastle_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"Test does not work with TCP init, see https://github.com/pytorch/pytorch/issues/46491",
)
def test_init_rpc_twice(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
# Wait for all init to complete.
dist.barrier()
# Use a different file name for the next initialization
new_backend_options = self.rpc_backend_options
new_backend_options.init_method += "init_2"
# Ensure rpc initialization works again.
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=new_backend_options,
)
# Verify RPCs work after re-init.
dst = worker_name((self.rank + 1) % self.world_size)
rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
rpc.rpc_sync(dst, foo_add, args=())
rpc.shutdown()
def test_wrong_types(self):
with self.assertRaisesRegex(
TypeError,
"Argument backend must be a member of BackendType",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend="TENSORPIPE",
)
with self.assertRaisesRegex(
TypeError,
"Argument rpc_backend_options must be an instance of RpcBackendOptions",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=self.rpc_backend,
rpc_backend_options={"init_method": self.init_method}
)
def test_cannot_infer_backend_from_options(self):
# An exception should be raised if the backend isn't specified but
# options are given which are not an instance of any of the known
# agents' option classes.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(TypeError, "Could not infer backend for options"):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
@dist_init
def test_owner_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t1 = torch.rand(10, 10, requires_grad=True)
rref = rpc.RRef(t1.sum() + t1.sum())
rref.backward()
expected_grad = torch.ones_like(t1) * 2
self.assertEqual(expected_grad, t1.grad)
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id)
self.assertEqual(expected_grad, dist_autograd.get_gradients(context_id)[t1])
# Double backward.
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(expected_grad * 2, dist_autograd.get_gradients(context_id)[t1])
# Test errors.
with self.assertRaisesRegex(RuntimeError, "tensors does not require grad and does not have a grad_fn"):
rpc.RRef(torch.rand(10)).backward()
with self.assertRaisesRegex(RuntimeError, "grad can be implicitly created only for scalar outputs"):
rpc.RRef(torch.rand(10, requires_grad=True)).backward()
with self.assertRaisesRegex(RuntimeError, "Could not find autograd context with id: 100"):
rpc.RRef(torch.rand(10, requires_grad=True).sum()).backward(100)
with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"):
rpc.RRef("foo").backward()
@staticmethod
def _sum(x):
return x.sum()
@staticmethod
def _identity(x):
return x
@dist_init
def test_user_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t = torch.rand(10, requires_grad=True)
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._sum, args=(t,))
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(torch.ones_like(t) * 2, dist_autograd.get_gradients(context_id)[t])
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._identity, args=("foo",))
with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"):
rref.backward(context_id)
with self.assertRaisesRegex(RuntimeError, "User RRefs require 'dist_autograd_ctx_id' to be specified"):
rref.backward()
@dist_init(setup_rpc=False)
def test_shutdown_errors(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
if self.rank != 0:
og_func = rpc.api._broadcast_to_followers
og_rref_func = rpc.api._delete_all_user_and_unforked_owner_rrefs
# Monkey-patch _broadcast_to_followers to fail, which would ensure
# _all_gather on leader raises an exception.
def raise_error(sequence_id, objects_map):
og_func(sequence_id, objects_map)
raise RuntimeError('simulation')
# Monkey-patch _delete_all_user_and_unforked_owner_rrefs to fail,
# which would ensure barrier is not called on followers.
def rref_error():
raise RuntimeError('simulation rref')
try:
rpc.api._broadcast_to_followers = raise_error
rpc.api._delete_all_user_and_unforked_owner_rrefs = rref_error
with self.assertRaisesRegex(RuntimeError, 'simulation rref'):
rpc.shutdown()
finally:
rpc.api._broadcast_to_followers = og_func
rpc.api._delete_all_user_and_unforked_owner_rrefs = og_rref_func
else:
with self.assertRaisesRegex(RuntimeError, 'timed out in _all_gather'):
rpc.shutdown()
dist.barrier()
@dist_init
def test_my_parameter_server(self):
self._my_parameter_server(False)
class CudaRpcTest(RpcAgentTestFixture):
@skip_if_lt_x_gpu(2)
@dist_init
def test_profiler_remote_cuda(self):
if self.rank != 1:
return
dst_cuda_0 = (self.rank + 1) % self.world_size
dst_cuda_1 = (self.rank + 2) % self.world_size
dst_worker_cuda_0 = worker_name(dst_cuda_0)
dst_worker_cuda_1 = worker_name(dst_cuda_1)
with _profile(use_cuda=True) as p:
fut1 = rpc.rpc_async(dst_worker_cuda_0, udf_with_torch_ops, args=(0, ))
fut2 = rpc.rpc_async(dst_worker_cuda_1, udf_with_torch_ops, args=(1, ))
fut1.wait()
fut2.wait()
def get_name(event):
return event.name[event.name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR):]
function_events = p.function_events
for event in function_events:
if event.is_async:
self.assertEqual(0, event.device_time_total)
self.assertEqual([], event.kernels)
self.assertEqual(0, event.device_time)
else:
if event.node_id == 1:
continue
self.assertTrue(event.node_id in [dst_cuda_0, dst_cuda_1])
if get_name(event) in EXPECTED_REMOTE_EVENTS:
self.assertGreater(event.device_time_total, 0)
self.assertEqual(1, len(event.kernels))
kernel = event.kernels[0]
if event.node_id == dst_cuda_0:
self.assertEqual(kernel.device, 0)
if event.node_id == dst_cuda_1:
self.assertEqual(kernel.device, 1)
self.assertGreater(event.device_time, 0)
# Validate that EXPECTED_REMOTE_EVENTS is a subset of remotely profiled
# events.
remote_events = [event for event in function_events if event.is_remote]
remote_event_names = [get_name(event) for event in remote_events if get_name(event) in EXPECTED_REMOTE_EVENTS]
self.assertEqual(set(remote_event_names), set(EXPECTED_REMOTE_EVENTS))
class TensorPipeAgentRpcTest(RpcAgentTestFixture, RpcTestCommon):
def test_mismatched_type_for_options(self):
# An exception should be raised if the options are not an instance of
# TensorPipeRpcBackendOptions.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(
TypeError, "`rpc_backend_options` must be a `TensorPipeRpcBackendOptions`"
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=rpc.BackendType.TENSORPIPE,
rpc_backend_options=rpc_backend_options,
)
def test_infer_backend_from_options(self):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.init_method,
_transports=tp_transports()
)
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
self.assertIsInstance(rpc.api._get_current_rpc_agent(), rpc.TensorPipeAgent)
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_set_and_get_num_worker_threads(self):
NUM_THREADS = 27
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=NUM_THREADS,
_transports=tp_transports(),
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREADS)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_set_default_timeout(self):
# Set a high timeout since it doesn't affect test runtime and ensures
# the test doesn't erroneously timeout due to slow machines.
timeout = 100
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout,
_transports=tp_transports(),
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
default_timeout = rpc.get_rpc_timeout()
self.assertEqual(default_timeout, timeout)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_options_throw_on_timedelta_timeout(self):
from datetime import timedelta
timeout = timedelta()
# Ensure that constructing TensorPipeRpcBackendOptions with timedelta fails
with self.assertRaisesRegex(TypeError, "incompatible constructor arguments"):
rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout,
)
@dist_init
def _test_rref_get_type_timeout(self, blocking):
# Test where we try to get the type of a RRef from an owner, but RRef
# creation is slower than timeout passed into _get_type.
dst_rank = (self.rank + 1) % self.world_size
dst = worker_name(dst_rank)
slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True))
timeout = 0.5
expected_err = self.get_timeout_error_regex()
# Blocking: blocks on inline call
if blocking:
with self.assertRaisesRegex(RuntimeError, expected_err):
slow_rref._get_type(timeout=timeout, blocking=blocking)
# Non-blocking: blocks on wait
else:
fut = slow_rref._get_type(timeout=timeout, blocking=blocking)
with self.assertRaisesRegex(RuntimeError, expected_err):
fut.wait()
# FIXME We wait until the remote completed creating the OwnerRRef
# because there's currently a race if we shut down RPC before that.
slow_rref.to_here()
def test_rref_get_type_timeout_blocking(self):
self._test_rref_get_type_timeout(blocking=True)
def test_rref_get_type_timeout_non_blocking(self):
self._test_rref_get_type_timeout(blocking=False)
@dist_init
def test_op_with_invalid_args(self):
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Overloaded torch operator invoked from Python failed to match any schema"
):
rpc.rpc_sync(dst, torch.add, args=())
def _test_rref_proxy_timeout(self, rref_proxy_api):
dst_rank = (self.rank + 1) % self.world_size
dst = worker_name(dst_rank)
rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), ))
# Ensure RRef is created on remote node.
rref.to_here()
rref_api = getattr(rref, rref_proxy_api)
self.assertTrue(rref_api is not None, f"Failed to get RRef proxy api: {rref_proxy_api}")
expected_error = self.get_timeout_error_regex()
timeout = 2
with self.assertRaisesRegex(RuntimeError, expected_error):
result = rref_api(timeout=timeout).my_slow_method(torch.ones(2, 2))
if rref_api == rref.rpc_async:
result.wait()
elif rref_api == rref.remote:
result._get_future().wait()
# Case where rpc.remote() is stuck and exceeds timeout
slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True))
timeout = 0.01
rref_api = getattr(slow_rref, rref_proxy_api)
# Note that even when we call rref.rpc_async() in this case, we
# time out in future creation, not waiting for future. This is because
# rref proxy function calls rref._get_type before returning future,
# which blocks on the RRef being created on owner node, until the
# specified timeout.
with self.assertRaisesRegex(RuntimeError, expected_error):
result = rref_api(timeout=timeout).my_instance_method(torch.ones(2, 2))
# rpc_async returns immediately and surface a timeout through wait()
if rref_api == slow_rref.rpc_async:
result.wait()
# FIXME We wait until the remote completed creating the OwnerRRef
# because there's currently a race if we shut down RPC before that.
slow_rref.to_here()
@dist_init
def test_rref_proxy_timeout(self):
for rpc_api in ["rpc_sync", "rpc_async", "remote"]:
self._test_rref_proxy_timeout(rpc_api)
@dist_init
def test_send_to_rank_sparse(self):
dst_rank = (self.rank + 1) % self.world_size
# Test sparse tensor
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
x = build_sparse_tensor()
y = build_sparse_tensor()
expected_tensor = (x + y)
ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(x, y))
self.assertEqual(expected_tensor, ret)
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
x = build_sparse_tensor(coalesce=True)
y = build_sparse_tensor(coalesce=True)
expected_tensor = (x + y)
ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(x, y))
self.assertEqual(expected_tensor, ret)
@dist_init
def test_self_py_udf_remote_sparse(self):
self._self_py_udf_remote(
rpc.get_worker_info(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
@dist_init
def test_self_remote_rref_as_rpc_arg_sparse(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._self_remote_rref_as_rpc_arg(
dst,
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
@dist_init
def test_self_remote_rref_as_self_rpc_arg_sparse(self):
self._self_remote_rref_as_rpc_arg(
rpc.get_worker_info(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
@dist_init
def test_self_remote_rref_as_remote_arg_sparse(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._self_remote_rref_as_remote_arg(
dst,
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
@dist_init
def test_self_remote_rref_as_self_remote_arg_sparse(self):
self._self_remote_rref_as_remote_arg(
rpc.get_worker_info(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
def test_world_size_one_sparse(self):
self._world_size_one(
build_sparse_tensor(),
build_sparse_tensor()
)
@dist_init
def test_multi_rpc_sparse(self):
self._multi_rpc(True)
def test_wait_all_workers_sparse(self):
self._wait_all_workers(heavy_rpc_sparse, build_sparse_tensor())
def test_wait_all_workers_twice_sparse(self):
self._wait_all_workers_twice(heavy_rpc_sparse, build_sparse_tensor())
@dist_init
def test_py_sparse_tensors_in_container(self):
n = self.rank + 1
dst_rank = n % self.world_size
a = [build_sparse_tensor(), build_sparse_tensor()]
ret = rpc.rpc_sync(
worker_name(dst_rank), my_container_sum, args=(a,)
)
self.assertEqual(ret, my_container_sum(a))
@dist_init
def test_nested_rpc_sparse(self):
self._nested_rpc(nested_rpc_sparse, build_sparse_tensor() * 2)
@dist_init
def test_stress_heavy_rpc_sparse(self):
self._stress_test_rpc(heavy_rpc_sparse, repeat=20, args=(build_sparse_tensor(),))
@dist_init
def test_builtin_remote_ret_sparse(self):
self._builtin_remote_ret(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 2
)
@dist_init
def test_builtin_remote_self_sparse(self):
self._builtin_remote_self(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 2
)
@dist_init
def test_multi_builtin_remote_ret_sparse(self):
self._test_multi_remote_call(
torch.add, True,
args_fn=RpcTest._multi_args_fn
)
@dist_init
def test_multi_py_udf_remote_sparse(self):
self._test_multi_remote_call(
my_function,
True,
kwargs_fn=RpcTest._multi_kwargs_fn
)
@dist_init
def test_py_rref_args_sparse(self):
self._py_rref_args(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 4
)
@dist_init
def test_py_rref_args_user_share_sparse(self):
self._py_rref_args_user_share(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 6
)
@dist_init
def test_py_rpc_rref_args_sparse(self):
self._py_rpc_rref_args(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 6
)
@dist_init
def test_nested_remote_sparse(self):
self._nested_remote(
nested_remote_sparse,
build_sparse_tensor() + build_sparse_tensor()
)
@dist_init
def test_nested_rref_sparse(self):
self._nested_rref(
nested_rref_sparse,
build_sparse_tensor() * 2,
build_sparse_tensor() * 2
)
@dist_init
def test_nested_rref_stress_sparse(self):
self._nested_rref_stress(
nested_rref_sparse,
build_sparse_tensor() * 2,
build_sparse_tensor() * 2
)
@dist_init
def test_my_parameter_server_sparse(self):
self._my_parameter_server(True)
# Test init_rpc without world_size argument
@dist_init(setup_rpc=False)
def test_dynamic_rpc_init_rpc(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
# Dynamic RPC new ranks communicate with existing ranks
@dist_init(setup_rpc=False)
def test_dynamic_rpc_new_rank_can_communicated_with_existing_rank(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
if self.rank == 0:
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
rpc_backend_options=self.rpc_backend_options,
)
# Rank 0 will be initialized with RPC after this barrier
dist.barrier()
if self.rank != 0:
# Newly joined ranks will be able to communicate with rank 0, since that was created first
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
rpc_backend_options=self.rpc_backend_options,
)
result = rpc.rpc_sync(worker_name(0), torch.add, args=(torch.tensor(1), torch.tensor(1)))
self.assertEqual(torch.add(torch.tensor(1), torch.tensor(1)), result)
# Barrier to ensure that all rpc_sync calls are finished
dist.barrier()
rpc.shutdown()
# Dynamic RPC existing ranks can communicate with new ranks
@dist_init(setup_rpc=False)
def test_dynamic_rpc_existing_rank_can_communicate_with_new_rank(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
if self.rank == 0:
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
rpc_backend_options=self.rpc_backend_options,
)
# Rank 0 will be initialized with RPC after this barrier
dist.barrier()
# Rest of ranks join after barrier
if self.rank != 0:
# Newly joined ranks will be able to communicate with rank 0, since that was created first
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
rpc_backend_options=self.rpc_backend_options,
)
dist.barrier()
if self.rank == 0:
for i in range(1, self.world_size):
result = rpc.rpc_sync(worker_name(i), torch.add, args=(torch.tensor(1), torch.tensor(1)))
self.assertEqual(torch.add(torch.tensor(1), torch.tensor(1)), result)
# Barrier to ensure that all rpc_sync calls are finished
dist.barrier()
rpc.shutdown()
# Dynamic RPC existing ranks can communicate with new ranks using CUDA rpc
@skip_if_lt_x_gpu(2)
@dist_init(setup_rpc=False)
def test_dynamic_rpc_existing_rank_can_communicate_with_new_rank_cuda(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
if self.rank == 0:
options = self.rpc_backend_options
for i in range(1, self.world_size):
dst = worker_name(i)
options.set_device_map(dst, {1: 0})
options.set_device_map(dst, {0: 1})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
rpc_backend_options=options,
)
# Rank 0 will be initialized with RPC after this barrier
dist.barrier()
# Rest of ranks join after barrier
if self.rank != 0:
# Newly joined ranks will be able to communicate with rank 0, since that was created first
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
rpc_backend_options=self.rpc_backend_options,
)
# TODO: Cuda RPC is failing due to:
# terminate called after throwing an instance of 'c10::Error'
# what(): 0 <= device && static_cast<size_t>(device) < device_allocator.size()
# INTERNAL ASSERT FAILED at "../c10/cuda/CUDACachingAllocator.cpp":1937,
# please report a bug to PyTorch. Allocator not initialized for device 1: did you call init?
# dist.barrier()
# if self.rank == 0:
# for i in range(1, self.world_size):
# x = torch.ones(2)
# result_on_device_0 = rpc.rpc_sync(worker_name(i), torch.add, args=(x.to(0), 1))
# result_on_device_1 = rpc.rpc_sync(worker_name(i), torch.add, args=(x.to(1), 1))
# self.assertEqual(torch.add(torch.ones(2), 1), result_on_device_0)
# self.assertEqual(torch.device('cuda:0'), result_on_device_0.device)
# self.assertEqual(torch.add(torch.ones(2), 1), result_on_device_1)
# self.assertEqual(torch.device('cuda:1'), result_on_device_1.device)
# Barrier to ensure that all rpc_sync calls are finished
dist.barrier()
rpc.shutdown()
@dist_init(setup_rpc=False)
def test_dynamic_rpc_init_rpc_without_rank(self):
# default initialization uses file init
with self.assertRaisesRegex(ValueError, "rank parameter missing"):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rpc_backend_options=self.rpc_backend_options,
)
# env init
with self.assertRaisesRegex(ValueError, "environment variable RANK expected"):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(init_method="env://")
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rpc_backend_options=rpc_backend_options,
)
# tcp init
with self.assertRaisesRegex(ValueError, "rank parameter missing"):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(init_method="tcp://127.0.0.1:23456")
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rpc_backend_options=rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_dynamic_and_static_init_rpc_together(self):
# Initialize a static rpc group with size = self.world_size - 1
dist.init_process_group(
backend='gloo',
init_method=self.file_init_method,
rank=self.rank,
world_size=self.world_size)
world_size_minus_one = self.world_size - 1
if self.rank < world_size_minus_one:
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=world_size_minus_one,
rpc_backend_options=self.rpc_backend_options,
)
dist.barrier()
# Attempt to add an additional dynamic group member
if self.rank == world_size_minus_one:
# Expect error message to be thrown
with self.assertRaisesRegex(RuntimeError, "RPC group mixes statically and dynamically\
initialized members which is not supported."):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
rpc_backend_options=self.rpc_backend_options,
)
class TensorPipeAgentCudaRpcTest(RpcAgentTestFixture, RpcTestCommon):
def _test_device_maps(self, options, errMsg):
with self.assertRaisesRegex(ValueError, errMsg):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
self.assertFalse(rpc.api._is_current_rpc_agent_set())
@skip_if_lt_x_gpu(2)
def test_device_maps_wrong_worker_name(self):
options = self.rpc_backend_options
options.set_device_map("none_exist", {0: 1})
self._test_device_maps(
options,
errMsg="Node worker0 has invalid target node names in its device maps"
)
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_max_local_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {torch.cuda.device_count(): 0})
self._test_device_maps(
options,
errMsg="Node worker0 has source devices with invalid indices in its device map for worker1"
)
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_max_remote_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: torch.cuda.device_count()})
self._test_device_maps(
options,
errMsg="Node worker0 has target devices with invalid indices in its device map for worker1"
)
@skip_if_lt_x_gpu(2)
def test_device_maps_many_to_one(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {1: 0})
options.set_device_map(dst, {0: 0})
self._test_device_maps(
options,
errMsg="Node worker0 has duplicated target devices in its device map for worker1"
)
@skip_if_lt_x_gpu(2)
def test_device_maps_one_to_many(self):
if self.rank == 0:
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: 1})
with self.assertRaisesRegex(
ValueError, "`set_device_map` only supports 1-to-1 mapping"
):
options.set_device_map(dst, {0: 0})
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_min_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Device index must not be negative"
):
options.set_device_map(dst, {-1: 0})
with self.assertRaisesRegex(
RuntimeError, "Device index must not be negative"
):
options.set_device_map(dst, {0: -1})
@staticmethod
def _gpu_add(x, y):
if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 1]):
return (x + y).to(0)
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(2)
def test_device_maps_gpu(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: 1, 1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add,
args=(torch.zeros(2).to(0), torch.ones(2).to(0))
)
self.assertEqual(ret.device, torch.device(1))
self.assertEqual(ret, (torch.zeros(2) + torch.ones(2)).to(1))
rpc.shutdown()
@staticmethod
def _gpu_add_given_devices(x, y, x_to, y_to, z_to):
x_device = "cpu" if x.device.type == "cpu" else x.device.index
y_device = "cpu" if y.device.type == "cpu" else y.device.index
if x_device == x_to and y_device == y_to:
return x.to(z_to) + y.to(z_to)
else:
raise ValueError("Wrong device affinity")
def _test_device_maps_gpu(self, x_from, y_from, z_to, device_map, dst=None, fn=None):
fn = TensorPipeAgentCudaRpcTest._gpu_add_given_devices if fn is None else fn
x_to = device_map[x_from]
y_to = device_map[y_from]
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size) if dst is None else dst
options.set_device_map(dst, device_map)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(x_from)
y = torch.ones(2).to(y_from)
ret = rpc.rpc_sync(dst, fn, args=(x, y, x_to, y_to, z_to))
reverse_device_map = {device_map[k] : k for k in device_map}
z_from = reverse_device_map[z_to]
ret_device = "cpu" if ret.device.type == "cpu" else ret.device.index
self.assertEqual(ret_device, z_from)
self.assertEqual(ret, torch.ones(2).to(z_from))
rpc.shutdown()
def test_device_map_cpu(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to="cpu",
device_map={"cpu" : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(1)
def test_device_map_cpu_to_gpu_default(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to=0,
device_map={"cpu" : 0},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_cpu_to_gpu_non_default(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to=1,
device_map={"cpu" : 1},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(1)
def test_device_map_gpu_to_cpu_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to="cpu",
device_map={0 : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_to_cpu_non_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to="cpu",
device_map={1 : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to=0,
device_map={0 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_non_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to=1,
device_map={1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_default_to_non_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to=1,
device_map={0 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_non_default_to_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to=0,
device_map={1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_1(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_2(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_3(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_4(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_5(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_6(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_7(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_8(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_1(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_2(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_3(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_4(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_5(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_6(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_7(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_8(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@staticmethod
def _gpu_add_multi_gpu(x, y):
if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 0]):
return x.to(0) + y, x - y.to(1)
else:
raise ValueError("Wrong device affinity")
def _test_device_maps_multi_gpu(self, dst):
options = self.rpc_backend_options
options.set_device_map(dst, {0: 1})
options.set_device_map(dst, {1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(0)
y = torch.ones(2).to(1)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu,
args=(x, y)
)
self.assertEqual(rets[0].device, torch.device(1))
self.assertEqual(rets[1].device, torch.device(0))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_device_maps_multi_gpu(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_device_maps_multi_gpu(dst)
@skip_if_lt_x_gpu(2)
def test_device_maps_multi_gpu_self(self):
dst = worker_name(self.rank)
self._test_device_maps_multi_gpu(dst)
@staticmethod
def _gpu_add_return_to_gpu(x, y):
if x.device.type == 'cpu' and y.device.type == 'cpu':
return (x + y).to(0), (x - y).to(1), (x * y).to(2), (x / y).to(3)
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(2)
def test_device_maps_in_options(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc.TensorPipeRpcBackendOptions(
init_method=options.init_method,
num_worker_threads=options.num_worker_threads,
device_maps={dst: {0: 1, 1: 0}},
_transports=tp_transports()
)
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu,
args=(torch.zeros(2).to(0), torch.ones(2).to(1))
)
self.assertEqual(rets[0].device, torch.device(1))
self.assertEqual(rets[1].device, torch.device(0))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
rpc.shutdown()
def _test_device_maps_return_to_gpu(self, dst):
options = self.rpc_backend_options
options.set_device_map(dst, {0: 1})
options.set_device_map(dst, {1: 2})
options.set_device_map(dst, {2: 3})
options.set_device_map(dst, {3: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_return_to_gpu,
args=(torch.zeros(2), torch.ones(2))
)
for i in range(len(rets)):
self.assertEqual(rets[i].device, torch.device((3 + i) % 4))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(3))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
self.assertEqual(rets[2], (torch.zeros(2) * torch.ones(2)).to(1))
self.assertEqual(rets[3], (torch.zeros(2) / torch.ones(2)).to(2))
rpc.shutdown()
@skip_if_lt_x_gpu(4)
def test_device_maps_return_to_gpu(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_device_maps_return_to_gpu(dst)
@skip_if_lt_x_gpu(4)
def test_device_maps_return_to_gpu_self(self):
dst = worker_name(self.rank)
self._test_device_maps_return_to_gpu(dst)
@staticmethod
def _add_to_gpu(x, y):
return (x + y).to(0)
def _test_device_maps_missing_config(self, mode):
dst = worker_name((self.rank + 1) % self.world_size)
errMsg = (
"TensorPipe RPC backend only supports CPU tensors by default.*"
"`set_device_map` on `TensorPipeRpcBackendOptions`"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
if mode == RPCExecMode.SYNC:
rpc.rpc_sync(dst, torch.add, args=(torch.zeros(2).to(0), 1))
elif mode == RPCExecMode.REMOTE:
rpc.remote(dst, torch.add, args=(torch.zeros(2).to(0), 1)).to_here()
else:
raise ValueError(f"unexpected mode {mode}")
# make sure RPC is still functioning
ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1))
self.assertEqual(ret, torch.ones(2) + 1)
def _test_device_maps_missing_config_response(self, mode):
dst = worker_name((self.rank + 1) % self.world_size)
errMsg = "Response device mapping is not available"
with self.assertRaisesRegex(RuntimeError, errMsg):
if mode == RPCExecMode.SYNC:
rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
)
elif mode == RPCExecMode.REMOTE:
rpc.remote(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
).to_here()
else:
raise ValueError(f"unexpected mode {mode}")
# make sure RPC is still functioning
ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1))
self.assertEqual(ret, torch.ones(2) + 1)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config(self):
self._test_device_maps_missing_config(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
def test_device_maps_missing_config_not_timeout(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options
)
timeout = rpc.get_rpc_timeout()
tik = time.time()
self._test_device_maps_missing_config(RPCExecMode.SYNC)
rpc.shutdown()
tok = time.time()
self.assertTrue(tok - tik < timeout)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_loop(self):
for _ in range(self.rpc_backend_options.num_worker_threads + 5):
self._test_device_maps_missing_config(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_response(self):
self._test_device_maps_missing_config_response(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_response_loop(self):
for _ in range(self.rpc_backend_options.num_worker_threads + 5):
self._test_device_maps_missing_config_response(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_remote(self):
self._test_device_maps_missing_config(RPCExecMode.REMOTE)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_remote_response(self):
self._test_device_maps_missing_config_response(RPCExecMode.REMOTE)
@skip_if_lt_x_gpu(2)
def test_device_maps_remote(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rref = rpc.remote(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
)
self.assertEqual(rref.to_here().device.index, 1)
self.assertEqual(rref.to_here(), torch.ones(2).to(1))
rpc.shutdown()
@staticmethod
def _slow_add_on_user_stream(x, y):
s0 = torch.cuda.current_stream(x.device)
s1 = torch.cuda.Stream(device=x.device)
s1.wait_stream(s0)
x.record_stream(s1)
y.record_stream(s1)
with torch.cuda.stream(s1):
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
z = x + y
s0.wait_stream(s1)
z.record_stream(s0)
return z
def _test_custom_stream(self, fn, device_map):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, device_map)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
fn(dst)
rpc.shutdown()
def _test_stream_sync(self, dst):
x = torch.ones(2, 2).to(0)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, x)
)
self.assertEqual(ret, 2 * x)
@skip_if_lt_x_gpu(2)
def test_custom_stream(self):
self._test_custom_stream(self._test_stream_sync, {"cuda:0": "cuda:1"})
def _test_stream_multi_async(self, dst):
futs = []
for i in range(20):
x = torch.ones(2, 2).to(0) * i
futs.append(
rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, x)
)
)
for i in range(20):
self.assertEqual(futs[i].wait(), 2 * torch.ones(2, 2).to(0) * i)
@skip_if_lt_x_gpu(2)
def test_custom_stream_multi(self):
self._test_custom_stream(
self._test_stream_multi_async,
{"cuda:0": "cuda:1"}
)
@staticmethod
def _nested_slow_add_on_user_stream(dst, x, y, z):
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, y)
)
return TensorPipeAgentCudaRpcTest._slow_add_on_user_stream(ret, z)
def _test_stream_nested_sync(self, dst):
x = torch.ones(2, 2).to(0)
y = torch.ones(2, 2).to(0) * 2
z = torch.ones(2, 2).to(0) * 3
nested_dst = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream,
args=(nested_dst, x, y, z)
)
self.assertEqual(ret, 6 * x)
@skip_if_lt_x_gpu(2)
def test_custom_stream_nested(self):
self._test_custom_stream(
self._test_stream_nested_sync,
{"cuda:0": "cuda:1", "cuda:1": "cuda:0"}
)
def _test_stream_nested_multi_async(self, dst):
if self.rank == 0:
futs = []
n = 5
xs, ys, zs = [], [], []
for i in range(n):
x = torch.ones(2, 2).to(0) * (i - 1)
y = torch.ones(2, 2).to(0) * i
z = torch.ones(2, 2).to(0) * (i + 1)
xs.append(x)
ys.append(y)
zs.append(z)
nested_dst = worker_name((self.rank + 2) % self.world_size)
futs.append(
rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream,
args=(nested_dst, x, y, z)
)
)
for i in range(n):
self.assertEqual(futs[i].wait(), xs[i] + ys[i] + zs[i])
@skip_if_lt_x_gpu(2)
def test_custom_stream_nested_multi(self):
self._test_custom_stream(
self._test_stream_nested_multi_async,
{"cuda:0": "cuda:1", "cuda:1": "cuda:0"}
)
@staticmethod
def _gpu_add_wrong_gpus(x, y):
if x.is_cuda and y.is_cuda:
return x.cpu() + y.cuda()
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(1)
def test_device_mismatch(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(0)
y = torch.ones(2).to(0)
with self.assertRaisesRegex(
RuntimeError,
"Expected all tensors to be on the same device, but found at least two devices"
):
rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_wrong_gpus,
args=(x, y)
)
rpc.shutdown()
def _test_rref_synchronization(self, local_device, remote_device):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {local_device : remote_device})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 1:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
x = torch.randn(200, 1, 28, 28).to(local_device)
actual = rref.remote().forward(x).to_here()
expected = rref.rpc_sync().forward(x)
self.assertEqual(actual, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_to_here_synchronization1(self):
self._test_rref_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization2(self):
self._test_rref_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization3(self):
self._test_rref_synchronization("cuda:1", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization4(self):
self._test_rref_synchronization("cuda:0", "cuda:1")
def _test_rref_as_arg_synchronization(
self,
local_device,
remote_device,
devicesOptions=None
):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {local_device: remote_device})
input_src = worker_name((self.rank - 1 + self.world_size) % self.world_size)
options.set_device_map(input_src, {remote_device: local_device})
if devicesOptions is not None:
options.set_devices(devicesOptions[self.rank])
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 1:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
rref_x = RRef(torch.randn(200, 1, 28, 28).to(local_device))
actual = rref.remote().forward(rref_x, True).to_here()
expected = rref.rpc_sync().forward(rref_x, True)
self.assertEqual(actual, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_as_arg_synchronization1(self):
self._test_rref_as_arg_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization2(self):
self._test_rref_as_arg_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization3(self):
self._test_rref_as_arg_synchronization("cuda:1", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization4(self):
self._test_rref_as_arg_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(1)
def test_rref_as_arg_synchronization5(self):
self._test_rref_as_arg_synchronization(
"cuda:0",
"cuda:0",
[["cuda:0"] for _ in range(4)], # devicesOptions
)
@staticmethod
def _rref_relay(rref):
return rref.to_here()
def _test_rref_forward_synchronization(self, local_device, remote_device):
options = self.rpc_backend_options
input_src = worker_name(0)
model_dst = worker_name(1)
out_relay = worker_name(2)
if self.rank == 0:
# for 1) model construction 2) forward execution
options.set_device_map(model_dst, {local_device: remote_device})
# Forward output will be first copied to the relay node before
# returning to the worker. This is intentional, to test RRef
# forward CUDA stream synchronizations.
options.set_device_map(out_relay, {local_device: local_device})
elif self.rank == 1:
# worker1 hosts the model and runs forward. The forward functions
# calls RRef.to_here(), hence needs to configure the device map
options.set_device_map(input_src, {remote_device: local_device})
elif self.rank == 2:
# worker2 will get the out RRef and call to_here() and hence, needs
# to configure device map.
options.set_device_map(model_dst, {local_device: remote_device})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 0:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(model_dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
rref_input = RRef(torch.randn(200, 1, 28, 28).to(local_device))
rref_out = rref.remote().forward(rref_input, True)
out = rpc.remote(
out_relay,
TensorPipeAgentCudaRpcTest._rref_relay,
args=(rref_out,)
).to_here()
expected = rref.rpc_sync().forward(rref_input, True)
self.assertEqual(out, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_forward_synchronization1(self):
self._test_rref_forward_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization2(self):
self._test_rref_forward_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization3(self):
self._test_rref_forward_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization4(self):
self._test_rref_forward_synchronization("cuda:1", "cuda:1")
def _test_owner_rref_forward_synchronization(self, local_device, remote_device):
if self.rank == 0:
options = self.rpc_backend_options
options.set_device_map("w0", {local_device: remote_device})
rpc.init_rpc(
"w0",
rank=0,
world_size=1,
rpc_backend_options=options
)
model = rpc.remote(
"w0", torch.nn.Linear, (2048, 20000)
).remote().to(remote_device)
for _ in range(30):
data = torch.rand(2048, 2048).to(local_device)
output = model.rpc_sync().forward(data)
# to_here() internally calls localValue as the caller is
# the owner of the RRef.
v0 = rpc.RRef(output).remote().sum().to_here().item()
v1 = output.sum().item()
self.assertEqual(v0, v1)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_owner_rref_forward_synchronization1(self):
self._test_owner_rref_forward_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization2(self):
self._test_owner_rref_forward_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization3(self):
self._test_owner_rref_forward_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization4(self):
self._test_owner_rref_forward_synchronization("cuda:1", "cuda:1")
@staticmethod
def _return_tensor_view(i):
x = torch.ones(1000, 200).cuda(0) * i
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
# serialization of the return value will create a new tensor from the
# view, which is done outside of the user function.
return x.split(100)[0]
@skip_if_lt_x_gpu(1)
def test_tensor_view_as_return_value(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0 : 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
futs = [
rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._return_tensor_view,
args=(i,)
) for i in range(5)
]
for i in range(5):
self.assertEqual(torch.ones(100, 200) * i, futs[i].wait())
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_devices_option_mismatch(self):
with self.assertRaisesRegex(
ValueError,
"Node worker0 has unexpected source devices in its device map for worker1"
):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0 : 0})
options.set_devices([1])
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_devices_option_mismatch_reverse(self):
with self.assertRaisesRegex(
ValueError,
"Node worker0 has unexpected target devices in its device map for worker1"
):
dst = worker_name((self.rank + 1) % self.world_size)
options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
device_maps={dst: {0 : 1}},
devices=[0]
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_int(self):
Future(devices=[0])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_str(self):
Future(devices=["cuda:0"])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_device(self):
Future(devices=[torch.device("cuda", 0)])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_not_cuda(self):
with self.assertRaisesRegex(
ValueError, "Expected devices to have indices, got cpu"
):
Future(devices=["cpu"])
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=False
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_list_with_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: [t], unwrapper=operator.itemgetter(0), sparse_tensor=False
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_custom_class_with_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=TensorWrapper, unwrapper=lambda v: v.tensor, sparse_tensor=False
)
@skip_if_lt_x_gpu(2)
def test_cuda_future_callback_changes_devices(self):
# We check proper CUDA stream synchronization by filling the tensor with
# the expected value in one stream, and reading it from another stream.
tensor0 = torch.zeros((100,), device="cuda:0")
tensor1 = torch.zeros((100,), device="cuda:1")
parent_future = Future(devices=["cuda:0", "cuda:1"])
def cb(fut):
t0 = fut.value()
tensor1.copy_(t0, non_blocking=True)
return tensor1
child_future = parent_future.then(cb)
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor0.fill_(1)
parent_future.set_result(tensor0)
with torch.cuda.device("cuda:1"):
another_stream = torch.cuda.Stream()
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(child_future.wait(), 1).all().item())
@skip_if_lt_x_gpu(2)
def test_cuda_future_value_on_bad_device(self):
tensor0 = torch.zeros((100,), device="cuda:0")
tensor1 = torch.zeros((100,), device="cuda:1")
parent_future = Future(devices=["cuda:1"])
# As a plus, we test that futures still invoke callbacks even in case of
# error, and that the child futures are successful if those callbacks
# don't access the parent future.
def cb(fut):
with torch.cuda.device("cuda:1"):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor1.fill_(1)
return tensor1
child_future = parent_future.then(cb)
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor0.fill_(1)
parent_future.set_result(tensor0)
with self.assertRaisesRegex(
ValueError,
r"The result contained tensors residing on device\(s\) cuda:0 "
r"which are not among the expected device\(s\) cuda:1",
):
parent_future.wait()
with torch.cuda.device("cuda:1"):
another_stream = torch.cuda.Stream()
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(child_future.wait(), 1).all().item())
@skip_if_lt_x_gpu(1)
def test_async_execution_with_cuda_future(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
t = torch.zeros((100,), device="cuda:0")
fut = rpc.rpc_async(dst, async_cuda_sleep_and_set_to_one, args=(t,))
another_stream = torch.cuda.Stream("cuda:0")
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(fut.wait(), 1).all().item())
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_async_execution_nested_with_cuda_future(self):
dst = worker_name((self.rank + 1) % self.world_size)
nested_dst = worker_name((self.rank + 2) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
a = torch.ones((100,), device="cuda:0")
b = torch.ones((100,), device="cuda:0")
c = torch.ones((100,), device="cuda:0")
fut = rpc.rpc_async(dst, async_cuda_nested_add, args=(nested_dst, a, b, c))
another_stream = torch.cuda.Stream("cuda:0")
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(fut.wait(), 3).all().item())
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_cuda_future_modify_tensor_inplace(self):
tensor = torch.zeros((100,), device="cuda:0")
future = Future(devices=["cuda:0"])
future.set_result(tensor)
# It's weird to modify the value of a future once it's complete, but
# technically possible. Currently this is considered undefined behavior
# (in practice the future will ignore the modification and still
# synchronize with the original value). We could one day add logic to
# detect and warn or throw in such cases, but for now we just check that
# this doesn't crash.
tensor.fill_(1)
future.wait()
@skip_if_lt_x_gpu(1)
def test_cuda_future_replace_tensor(self):
tensor_list = [torch.zeros((100,), device="cuda:0")]
future = Future(devices=["cuda:0"])
future.set_result(tensor_list)
# It's weird to modify the value of a future once it's complete, but
# technically possible. Currently this is considered undefined behavior
# (in practice the future will ignore the modification and still
# synchronize with the original value). We could one day add logic to
# detect and warn or throw in such cases, but for now we just check that
# this doesn't crash.
# We set things up so that the original tensor contained in the list
# gets deleted once we replace it with the other one. This will
# invalidate any cached information held by the future.
tensor_list[0] = torch.ones((100,), device="cuda:0")
future.wait()
@skip_if_lt_x_gpu(1)
def test_rref_with_unpickleable_attributes(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rref = rpc.remote(dst, TensorWrapper, args=(torch.zeros(42, device="cuda:0"),))
rref.rpc_sync().increase(1)
ret = rref.rpc_sync().sum()
self.assertEqual(ret, 42)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=True
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_list_with_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: [t], unwrapper=operator.itemgetter(0), sparse_tensor=True
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_custom_class_with_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=TensorWrapper, unwrapper=lambda v: v.tensor, sparse_tensor=True
)
```
|
====================================================================================================================================================================
SOURCE CODE FILE: tensorpipe_rpc_agent_test_fixture.py
LINES: 1
SIZE: 1.04 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\rpc\tensorpipe_rpc_agent_test_fixture.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import torch.distributed.rpc as rpc
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
from torch.testing._internal.common_distributed import (
tp_transports,
)
class TensorPipeRpcAgentTestFixture(RpcAgentTestFixture):
@property
def rpc_backend(self):
return rpc.backend_registry.BackendType[
"TENSORPIPE"
]
@property
def rpc_backend_options(self):
return rpc.backend_registry.construct_rpc_backend_options(
self.rpc_backend,
init_method=self.init_method,
_transports=tp_transports()
)
def get_shutdown_error_regex(self):
# FIXME Once we consolidate the error messages returned by the
# TensorPipe agent put some more specific regex here.
error_regexes = [".*"]
return "|".join([f"({error_str})" for error_str in error_regexes])
def get_timeout_error_regex(self):
return "RPC ran for more than"
```
|
========================================================================================================================================
SOURCE CODE FILE: rpc_utils.py
LINES: 1
SIZE: 6.59 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\rpc_utils.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import os
import sys
import unittest
from torch.testing._internal.common_distributed import MultiProcessTestCase
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
find_free_port,
IS_SANDCASTLE,
)
from torch.testing._internal.distributed.ddp_under_dist_autograd_test import (
CudaDdpComparisonTest,
DdpComparisonTest,
DdpUnderDistAutogradTest,
)
from torch.testing._internal.distributed.nn.api.remote_module_test import (
CudaRemoteModuleTest,
RemoteModuleTest,
ThreeWorkersRemoteModuleTest,
)
from torch.testing._internal.distributed.rpc.dist_autograd_test import (
DistAutogradTest,
CudaDistAutogradTest,
FaultyAgentDistAutogradTest,
TensorPipeAgentDistAutogradTest,
TensorPipeCudaDistAutogradTest
)
from torch.testing._internal.distributed.rpc.dist_optimizer_test import (
DistOptimizerTest,
)
from torch.testing._internal.distributed.rpc.jit.dist_autograd_test import (
JitDistAutogradTest,
)
from torch.testing._internal.distributed.rpc.jit.rpc_test import JitRpcTest
from torch.testing._internal.distributed.rpc.jit.rpc_test_faulty import (
JitFaultyAgentRpcTest,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
from torch.testing._internal.distributed.rpc.faulty_agent_rpc_test import (
FaultyAgentRpcTest,
)
from torch.testing._internal.distributed.rpc.rpc_test import (
CudaRpcTest,
RpcTest,
TensorPipeAgentRpcTest,
TensorPipeAgentCudaRpcTest,
)
from torch.testing._internal.distributed.rpc.examples.parameter_server_test import ParameterServerTest
from torch.testing._internal.distributed.rpc.examples.reinforcement_learning_rpc_test import (
ReinforcementLearningRpcTest,
)
def _check_and_set_tcp_init():
# if we are running with TCP init, set main address and port
# before spawning subprocesses, since different processes could find
# different ports.
use_tcp_init = os.environ.get("RPC_INIT_WITH_TCP", None)
if use_tcp_init == "1":
os.environ["MASTER_ADDR"] = '127.0.0.1'
os.environ["MASTER_PORT"] = str(find_free_port())
def _check_and_unset_tcp_init():
use_tcp_init = os.environ.get("RPC_INIT_WITH_TCP", None)
if use_tcp_init == "1":
del os.environ["MASTER_ADDR"]
del os.environ["MASTER_PORT"]
# The tests for the RPC module need to cover multiple possible combinations:
# - different aspects of the API, each one having its own suite of tests;
# - different agents (ProcessGroup, TensorPipe, ...);
# To avoid a combinatorial explosion in code size, and to prevent forgetting to
# add a combination, these are generated automatically by the code in this file.
# Here, we collect all the test suites that we need to cover.
# We then have one separate file for each agent, from which
# we call the generate_tests function of this file, passing to it a fixture for
# the agent, which then gets mixed-in with each test suite.
@unittest.skipIf(
TEST_WITH_DEV_DBG_ASAN, "Skip ASAN as torch + multiprocessing spawn have known issues"
)
class SpawnHelper(MultiProcessTestCase):
def setUp(self):
super().setUp()
_check_and_set_tcp_init()
self._spawn_processes()
def tearDown(self):
_check_and_unset_tcp_init()
super().tearDown()
# This list contains test suites that are agent-agnostic and that only verify
# compliance with the generic RPC interface specification. These tests should
# *not* make use of implementation details of a specific agent (options,
# attributes, ...). These test suites will be instantiated multiple times, once
# for each agent (except the faulty agent, which is special).
GENERIC_TESTS = [
RpcTest,
ParameterServerTest,
DistAutogradTest,
DistOptimizerTest,
JitRpcTest,
JitDistAutogradTest,
RemoteModuleTest,
ThreeWorkersRemoteModuleTest,
DdpUnderDistAutogradTest,
DdpComparisonTest,
ReinforcementLearningRpcTest,
]
GENERIC_CUDA_TESTS = [
CudaRpcTest,
CudaDistAutogradTest,
CudaRemoteModuleTest,
CudaDdpComparisonTest,
]
# This list contains test suites that will only be run on the TensorPipeAgent.
# These suites should be standalone, and separate from the ones in the generic
# list (not subclasses of those!).
TENSORPIPE_TESTS = [
TensorPipeAgentRpcTest,
TensorPipeAgentDistAutogradTest,
]
TENSORPIPE_CUDA_TESTS = [
TensorPipeAgentCudaRpcTest,
TensorPipeCudaDistAutogradTest,
]
# This list contains test suites that will only be run on the faulty RPC agent.
# That agent is special as it's only used to perform fault injection in order to
# verify the error handling behavior. Thus the faulty agent will only run the
# suites in this list, which were designed to test such behaviors, and not the
# ones in the generic list.
FAULTY_AGENT_TESTS = [
FaultyAgentRpcTest,
FaultyAgentDistAutogradTest,
JitFaultyAgentRpcTest,
]
def generate_tests(
prefix: str,
mixin: type[RpcAgentTestFixture],
tests: list[type[RpcAgentTestFixture]],
module_name: str,
) -> dict[str, type[RpcAgentTestFixture]]:
"""Mix in the classes needed to autogenerate the tests based on the params.
Takes a series of test suites, each written against a "generic" agent (i.e.,
derived from the abstract RpcAgentTestFixture class), as the `tests` args.
Takes a concrete subclass of RpcAgentTestFixture, which specializes it for a
certain agent, as the `mixin` arg. Produces all combinations of them.
Returns a dictionary of class names to class type
objects which can be inserted into the global namespace of the calling
module. The name of each test will be a concatenation of the `prefix` arg
and the original name of the test suite.
The `module_name` should be the name of the calling module so
that the classes can be fixed to make it look like they belong to it, which
is necessary for pickling to work on them.
"""
ret: dict[str, type[RpcAgentTestFixture]] = {}
for test_class in tests:
if IS_SANDCASTLE and TEST_WITH_DEV_DBG_ASAN:
print(
f'Skipping test {test_class} on sandcastle for the following reason: '
'Skip dev-asan as torch + multiprocessing spawn have known issues', file=sys.stderr)
continue
name = f"{prefix}{test_class.__name__}"
class_ = type(name, (test_class, mixin, SpawnHelper), {})
class_.__module__ = module_name
ret[name] = class_
return ret
```
|
=======================================================================================================================================
SOURCE CODE FILE: dynamo_test_failures.py
LINES: 1
SIZE: 5.15 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\dynamo_test_failures.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import logging
import os
import sys
# NOTE: [dynamo_test_failures.py]
#
# We generate xFailIfTorchDynamo* for all tests in `dynamo_expected_failures`
# We generate skipIfTorchDynamo* for all tests in `dynamo_skips`
#
# For an easier-than-manual way of generating and updating these lists,
# see scripts/compile_tests/update_failures.py
#
# If you're adding a new test, and it's failing PYTORCH_TEST_WITH_DYNAMO=1,
# either add the appropriate decorators to your test or add skips for them
# via test/dynamo_skips and test/dynamo_expected_failures.
#
# *These are not exactly unittest.expectedFailure and unittest.skip. We'll
# always execute the test and then suppress the signal, if necessary.
# If your tests crashes, or is slow, please use @skipIfTorchDynamo instead.
#
# The expected failure and skip files are located in test/dynamo_skips and
# test/dynamo_expected_failures. They're individual files rather than a list so
# git will merge changes easier.
def find_test_dir():
# Find the path to the dynamo expected failure and skip files.
from os.path import abspath, basename, dirname, exists, join, normpath
if sys.platform == "win32":
return None
# Check relative to this file (local build):
test_dir = normpath(join(dirname(abspath(__file__)), "../../../test"))
if exists(join(test_dir, "dynamo_expected_failures")):
return test_dir
# Check relative to __main__ (installed builds relative to test file):
main = sys.modules["__main__"]
file = getattr(main, "__file__", None)
if file is None:
# Generated files do not have a module.__file__
return None
test_dir = dirname(abspath(file))
while dirname(test_dir) != test_dir:
if basename(test_dir) == "test" and exists(
join(test_dir, "dynamo_expected_failures")
):
return test_dir
test_dir = dirname(test_dir)
# Not found
return None
test_dir = find_test_dir()
if not test_dir:
logger = logging.getLogger(__name__)
logger.warning(
"test/dynamo_expected_failures directory not found - known dynamo errors won't be skipped."
)
# Tests that run without strict mode in PYTORCH_TEST_WITH_INDUCTOR=1.
# Please don't add anything to this list.
FIXME_inductor_non_strict = {
"test_modules",
"test_ops",
"test_ops_gradients",
"test_torch",
}
# We generate unittest.expectedFailure for all of the following tests
# when run under PYTORCH_TEST_WITH_DYNAMO=1.
# see NOTE [dynamo_test_failures.py] for more details
#
# This lists exists so we can more easily add large numbers of failing tests,
if test_dir is None:
dynamo_expected_failures = set()
dynamo_skips = set()
inductor_expected_failures = set()
inductor_skips = set()
else:
dynamo_failures_directory = os.path.join(test_dir, "dynamo_expected_failures")
dynamo_skips_directory = os.path.join(test_dir, "dynamo_skips")
dynamo_expected_failures = set(os.listdir(dynamo_failures_directory))
dynamo_skips = set(os.listdir(dynamo_skips_directory))
inductor_failures_directory = os.path.join(test_dir, "inductor_expected_failures")
inductor_skips_directory = os.path.join(test_dir, "inductor_skips")
inductor_expected_failures = set(os.listdir(inductor_failures_directory))
inductor_skips = set(os.listdir(inductor_skips_directory))
# TODO: due to case sensitivity problems, for now list these files by hand
extra_dynamo_skips = {
"TestProxyTensorOpInfoCPU.test_make_fx_exhaustive_T_cpu_float32",
"TestProxyTensorOpInfoCPU.test_make_fx_exhaustive_t_cpu_float32",
"TestProxyTensorOpInfoCPU.test_make_fx_fake_exhaustive_T_cpu_float32",
"TestProxyTensorOpInfoCPU.test_make_fx_fake_exhaustive_t_cpu_float32",
"TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_T_cpu_float32",
"TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_t_cpu_float32",
"TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_inplace_T_cpu_float32",
"TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_inplace_t_cpu_float32",
"TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_out_T_cpu_float32",
"TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_out_t_cpu_float32",
}
dynamo_skips = dynamo_skips.union(extra_dynamo_skips)
# verify some invariants
for test in (
dynamo_expected_failures
| dynamo_skips
| inductor_expected_failures
| inductor_skips
):
if len(test.split(".")) != 2:
raise AssertionError(f'Invalid test name: "{test}"')
dynamo_intersection = dynamo_expected_failures.intersection(dynamo_skips)
if len(dynamo_intersection) > 0:
raise AssertionError(
"there should be no overlap between dynamo_expected_failures "
"and dynamo_skips, got " + str(dynamo_intersection)
)
inductor_intersection = inductor_expected_failures.intersection(inductor_skips)
if len(inductor_intersection) > 0:
raise AssertionError(
"there should be no overlap between inductor_expected_failures "
"and inductor_skips, got " + str(inductor_intersection)
)
```
|
=====================================================================================================================================
SOURCE CODE FILE: fake_config_module.py
LINES: 1
SIZE: 1.27 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\fake_config_module.py
ENCODING: utf-8
```py
import sys
from typing import Optional
from torch.utils._config_module import Config, install_config_module
e_bool = True
e_int = 1
e_float = 1.0
e_string = "string"
e_list = [1]
e_set = {1}
e_tuple = (1,)
e_dict = {1: 2}
e_none: Optional[bool] = None
e_optional: Optional[bool] = True
e_ignored = True
_e_ignored = True
magic_cache_config_ignored = True
# [@compile_ignored: debug]
e_compile_ignored = True
e_config: bool = Config(default=True)
e_jk: bool = Config(justknob="does_not_exist", default=True)
e_jk_false: bool = Config(justknob="does_not_exist", default=False)
e_env_default: bool = Config(env_name_default="ENV_TRUE", default=False)
e_env_default_FALSE: bool = Config(env_name_default="ENV_FALSE", default=True)
e_env_default_str: bool = Config(env_name_default="ENV_STR", default="default")
e_env_default_str_empty: bool = Config(
env_name_default="ENV_STR_EMPTY", default="default"
)
e_env_force: bool = Config(env_name_force="ENV_TRUE", default=False)
e_aliased_bool: bool = Config(
alias="torch.testing._internal.fake_config_module2.e_aliasing_bool"
)
class nested:
e_bool = True
_cache_config_ignore_prefix = ["magic_cache_config"]
_save_config_ignore = ["e_ignored"]
install_config_module(sys.modules[__name__])
```
|
======================================================================================================================================
SOURCE CODE FILE: fake_config_module2.py
LINES: 1
SIZE: 0.35 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\fake_config_module2.py
ENCODING: utf-8
```py
import sys
from torch.utils._config_module import Config, install_config_module
e_aliasing_bool = False
e_env_default_multi: bool = Config(
env_name_default=["ENV_TRUE", "ENV_FALSE"], default=False
)
e_env_force_multi: bool = Config(env_name_force=["ENV_FAKE", "ENV_TRUE"], default=False)
install_config_module(sys.modules[__name__])
```
|
======================================================================================================================================
SOURCE CODE FILE: fake_config_module3.py
LINES: 1
SIZE: 0.22 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\fake_config_module3.py
ENCODING: utf-8
```py
import sys
from typing import Callable, Optional
from torch.utils._config_module import install_config_module
e_list = [1]
e_set = {1}
e_func: Optional[Callable] = None
install_config_module(sys.modules[__name__])
```
|
=====================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.00 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\generated\__init__.py
ENCODING: utf-8
```py
```
|
==============================================================================================================================================
SOURCE CODE FILE: annotated_fn_args.py
LINES: 1
SIZE: 536.32 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\generated\annotated_fn_args.py
ENCODING: utf-8
```py
"""
This file is needed for generating procedural tests required for
testing __torch_function__. See tests/test_overrides.py.
"""
# flake8: noqa
import torch
annotated_args = {
torch._C._VariableFunctions._cast_Byte: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._cast_Char: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._cast_Double: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._cast_Float: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._cast_Int: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._cast_Long: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._cast_Short: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._cast_Half: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._make_dual: [{'is_kwarg_only': 'False', 'name': 'primal', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tangent', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'level', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._unpack_dual: [{'is_kwarg_only': 'False', 'name': 'dual', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'level', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.align_tensors: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._assert_async: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._assert_async: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'assert_msg', 'simple_type': 'c10::string_view'}],
torch._C._VariableFunctions._assert_scalar: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'assert_msg', 'simple_type': 'c10::string_view'}],
torch._C._VariableFunctions._functional_assert_scalar: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'assert_msg', 'simple_type': 'c10::string_view'}, {'is_kwarg_only': 'False', 'name': 'dep_token', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._functional_assert_async: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'assert_msg', 'simple_type': 'c10::string_view'}, {'is_kwarg_only': 'False', 'name': 'dep_token', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._assert_tensor_metadata: [{'is_kwarg_only': 'False', 'name': 'a', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._print: [{'is_kwarg_only': 'False', 'name': 's', 'simple_type': 'c10::string_view'}],
torch._C._VariableFunctions.sym_constrain_range: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.sym_constrain_range_for_size: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions._functional_sym_constrain_range: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'int64_t?'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'int64_t?'}, {'is_kwarg_only': 'False', 'name': 'dep_token', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._functional_sym_constrain_range_for_size: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'int64_t?'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'int64_t?'}, {'is_kwarg_only': 'False', 'name': 'dep_token', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._make_dep_token: [],
torch._C._VariableFunctions._use_cudnn_ctc_loss: [{'is_kwarg_only': 'False', 'name': 'log_probs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'targets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input_lengths', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'target_lengths', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'blank', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._use_cudnn_ctc_loss: [{'is_kwarg_only': 'False', 'name': 'log_probs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'targets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input_lengths', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target_lengths', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'blank', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._cudnn_ctc_loss: [{'is_kwarg_only': 'False', 'name': 'log_probs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'targets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input_lengths', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'target_lengths', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'blank', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'deterministic', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'zero_infinity', 'simple_type': 'bool'}],
torch._C._VariableFunctions._cudnn_ctc_loss: [{'is_kwarg_only': 'False', 'name': 'log_probs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'targets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input_lengths', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target_lengths', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'blank', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'deterministic', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'zero_infinity', 'simple_type': 'bool'}],
torch._C._VariableFunctions._use_cudnn_rnn_flatten_weight: [],
torch._C._VariableFunctions._cudnn_rnn_flatten_weight: [{'is_kwarg_only': 'False', 'name': 'weight_arr', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'weight_stride0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'input_size', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'hidden_size', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'proj_size', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}],
torch._C._VariableFunctions._cudnn_rnn: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'weight_stride0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'weight_buf', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'cx', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'hidden_size', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'proj_size', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'batch_sizes', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dropout_state', 'simple_type': 'Tensor?'}],
torch._C._VariableFunctions._cudnn_init_dropout_state: [{'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'dropout_seed', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._debug_has_internal_overlap: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._fused_dropout: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}],
torch._C._VariableFunctions._masked_scale: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'double'}],
torch._C._VariableFunctions.native_dropout: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool?'}],
torch._C._VariableFunctions._sobol_engine_draw: [{'is_kwarg_only': 'False', 'name': 'quasi', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'sobolstate', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dimension', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'num_generated', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType?'}],
torch._C._VariableFunctions._sobol_engine_ff_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'sobolstate', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dimension', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'num_generated', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._sobol_engine_scramble_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'ltm', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dimension', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._sobol_engine_initialize_state_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dimension', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._reshape_from_tensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'shape', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._shape_as_tensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.dropout: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}],
torch._C._VariableFunctions.dropout_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}],
torch._C._VariableFunctions.feature_dropout: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}],
torch._C._VariableFunctions.feature_dropout_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}],
torch._C._VariableFunctions.alpha_dropout: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}],
torch._C._VariableFunctions.alpha_dropout_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}],
torch._C._VariableFunctions.feature_alpha_dropout: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}],
torch._C._VariableFunctions.feature_alpha_dropout_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}],
torch._C._VariableFunctions.abs: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.abs: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.abs_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.absolute: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.absolute: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.angle: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.angle: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.view_as_real: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.view_as_complex: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.sgn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.sgn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.real: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.imag: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._conj: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.conj: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._conj_physical: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.conj_physical: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.conj_physical: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.conj_physical_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.resolve_conj: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.resolve_neg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._neg_view: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.acos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.acos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.acos_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.arccos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.arccos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.arccos_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.avg_pool1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 1}],
torch._C._VariableFunctions.adaptive_avg_pool1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 1}],
torch._C._VariableFunctions.adaptive_max_pool1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 1}],
torch._C._VariableFunctions.add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._add_relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._add_relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._add_relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions._add_relu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._add_relu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.addmv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.addmv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.addmv_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.addr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.addr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.affine_grid_generator: [{'is_kwarg_only': 'False', 'name': 'theta', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}],
torch._C._VariableFunctions._is_all_true: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._is_any_true: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._test_check_tensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._test_functorch_fallback: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.allclose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.arange: [{'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.arange: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.arange: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.arange: [{'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.arange: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions._dim_arange: [{'is_kwarg_only': 'False', 'name': 'like', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.argmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.argmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.argmin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.argmin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.acosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.acosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.acosh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.arccosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.arccosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.arccosh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.asinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.asinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.asinh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.arcsinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.arcsinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.arcsinh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.atanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.atanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.atanh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.arctanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.arctanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.arctanh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.as_strided: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.as_strided_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.asin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.asin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.asin_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.arcsin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.arcsin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.arcsin_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.atan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.atan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.atan_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.arctan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.arctan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.arctan_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.atleast_1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.atleast_1d: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.atleast_2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.atleast_2d: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.atleast_3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.atleast_3d: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.baddbmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.baddbmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.bartlett_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.bartlett_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'periodic', 'simple_type': 'bool'}],
torch._C._VariableFunctions.batch_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'cudnn_enabled', 'simple_type': 'bool'}],
torch._C._VariableFunctions.quantized_batch_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'var', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'output_scale', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'output_zero_point', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._batch_norm_impl_index: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'cudnn_enabled', 'simple_type': 'bool'}],
torch._C._VariableFunctions.bernoulli: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.bernoulli: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.bernoulli: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}],
torch._C._VariableFunctions.bilinear: [{'is_kwarg_only': 'False', 'name': 'input1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.binary_cross_entropy_with_logits: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.bincount: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.bitwise_not: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.bitwise_not: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.copysign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.copysign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.copysign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.copysign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions._lazy_clone: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.logical_not: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.logical_not: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.logical_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.logical_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.logical_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.logical_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.logical_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.logical_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.blackman_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.blackman_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'periodic', 'simple_type': 'bool'}],
torch._C._VariableFunctions.bmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.bmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.broadcast_tensors: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.broadcast_to: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions._sparse_broadcast_to: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions.cat: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.cat: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.cat: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.cat: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.concat: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.concat: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.concat: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.concat: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.concatenate: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.concatenate: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.concatenate: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.concatenate: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.block_diag: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.ceil: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.ceil: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.ceil_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.chain_matmul: [{'is_kwarg_only': 'False', 'name': 'matrices', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.chain_matmul: [{'is_kwarg_only': 'False', 'name': 'matrices', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.unsafe_chunk: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'chunks', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.chunk: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'chunks', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.tensor_split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sections', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.tensor_split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.tensor_split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor_indices_or_sections', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.clamp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.clamp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.clamp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.clamp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.clamp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.clamp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.clamp_max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.clamp_max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.clamp_max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.clamp_max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.clamp_max_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.clamp_max_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.clamp_min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.clamp_min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.clamp_min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.clamp_min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.clamp_min_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.clamp_min_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.clip: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.clip: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.clip: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.clip: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.clip_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.clip_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.cudnn_is_acceptable: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.complex: [{'is_kwarg_only': 'False', 'name': 'real', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'imag', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.complex: [{'is_kwarg_only': 'False', 'name': 'real', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'imag', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.polar: [{'is_kwarg_only': 'False', 'name': 'abs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'angle', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.polar: [{'is_kwarg_only': 'False', 'name': 'abs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'angle', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.constant_pad_nd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'pad', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.convolution: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'transposed', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'output_padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions._convolution: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'transposed', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'output_padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'benchmark', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'deterministic', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'cudnn_enabled', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'allow_tf32', 'simple_type': 'bool'}],
torch._C._VariableFunctions._convolution: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'transposed', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'output_padding', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'benchmark', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'deterministic', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'cudnn_enabled', 'simple_type': 'bool'}],
torch._C._VariableFunctions._convolution_mode: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'c10::string_view'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.conv1d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.conv1d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.conv2d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.conv2d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.conv3d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.conv3d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.conv_tbc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.conv_transpose1d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.conv_transpose2d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.conv_transpose3d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._copy_from: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dst', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._copy_from_and_resize: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dst', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.cos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.cos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.cos_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.cosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.cosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.cosh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.cosine_embedding_loss: [{'is_kwarg_only': 'False', 'name': 'input1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.count_nonzero: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions.count_nonzero: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.cov: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.corrcoef: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.cudnn_affine_grid_generator: [{'is_kwarg_only': 'False', 'name': 'theta', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'N', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'C', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'H', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'W', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.cudnn_batch_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'exponential_average_factor', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'epsilon', 'simple_type': 'double'}],
torch._C._VariableFunctions.cudnn_convolution: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'benchmark', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'deterministic', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'allow_tf32', 'simple_type': 'bool'}],
torch._C._VariableFunctions.cudnn_convolution: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'benchmark', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'deterministic', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'allow_tf32', 'simple_type': 'bool'}],
torch._C._VariableFunctions.cudnn_convolution_transpose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'output_padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'benchmark', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'deterministic', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'allow_tf32', 'simple_type': 'bool'}],
torch._C._VariableFunctions._mps_convolution_transpose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'output_padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.cudnn_convolution_relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.cudnn_convolution_add_relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'z', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'alpha', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.cudnn_grid_sampler: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'grid', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.cummax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.cummax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.cummax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.cummax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions._cummax_helper: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.cummin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.cummin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.cummin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.cummin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions._cummin_helper: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.cumprod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.cumprod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.cumprod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.cumprod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.cumsum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.cumsum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.cumsum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.cumsum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.cumulative_trapezoid: [{'is_kwarg_only': 'False', 'name': 'y', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.cumulative_trapezoid: [{'is_kwarg_only': 'False', 'name': 'y', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.ctc_loss: [{'is_kwarg_only': 'False', 'name': 'log_probs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'targets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input_lengths', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'target_lengths', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions.ctc_loss: [{'is_kwarg_only': 'False', 'name': 'log_probs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'targets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input_lengths', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target_lengths', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._ctc_loss: [{'is_kwarg_only': 'False', 'name': 'log_probs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'targets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input_lengths', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'target_lengths', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions._ctc_loss: [{'is_kwarg_only': 'False', 'name': 'log_probs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'targets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input_lengths', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target_lengths', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.diag_embed: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.diagflat: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.diagonal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.diagonal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.diff: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.diff: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.gradient: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.gradient: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'spacing', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'dim', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions.gradient: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'dim', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions.gradient: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'spacing', 'simple_type': 'ScalarList'}],
torch._C._VariableFunctions.gradient: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'spacing', 'simple_type': 'ScalarList'}, {'is_kwarg_only': 'True', 'name': 'dim', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions.gradient: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'spacing', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.gradient: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'spacing', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'True', 'name': 'dim', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions.div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}],
torch._C._VariableFunctions.div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}],
torch._C._VariableFunctions.div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}],
torch._C._VariableFunctions.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}],
torch._C._VariableFunctions.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}],
torch._C._VariableFunctions.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}],
torch._C._VariableFunctions.true_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.true_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.true_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.dot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.dot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.vdot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.vdot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.einsum: [{'is_kwarg_only': 'False', 'name': 'equation', 'simple_type': 'c10::string_view'}, {'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.embedding: [{'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.embedding_renorm_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max_norm', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'norm_type', 'simple_type': 'double'}],
torch._C._VariableFunctions._embedding_bag_forward_only: [{'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'offsets', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._rowwise_prune: [{'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'compressed_indices_dtype', 'simple_type': 'ScalarType'}],
torch._C._VariableFunctions.row_stack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.row_stack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.embedding_bag: [{'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'offsets', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.embedding_bag: [{'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'offsets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale_grad_by_freq', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'sparse', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'per_sample_weights', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'include_last_offset', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'padding_idx', 'simple_type': 'int64_t?'}],
torch._C._VariableFunctions._embedding_bag: [{'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'offsets', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.empty: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'names', 'simple_type': 'DimnameList?'}],
torch._C._VariableFunctions.empty: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.empty: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.empty_permuted: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'physical_layout', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions._empty_affine_quantized: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions._empty_per_channel_affine_quantized: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'scales', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'zero_points', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'axis', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._resize_output_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'device', 'simple_type': 'Device'}],
torch._C._VariableFunctions.empty_quantized: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'qtensor', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.empty_like: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.empty_strided: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.erf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.erf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.erf_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.erfc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.erfc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.erfc_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.exp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.exp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.exp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.exp2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.exp2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.exp2_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.expm1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.expm1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.expm1_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.eye: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.eye: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'm', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.eye: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.eye: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'm', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.flatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.flatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'start_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'end_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'out_dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.flatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'start_dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'end_dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'out_dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.flatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'DimnameList'}, {'is_kwarg_only': 'False', 'name': 'out_dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.unflatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'sizes', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.unflatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'sizes', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'names', 'simple_type': 'DimnameList'}],
torch._C._VariableFunctions.fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.floor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.floor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.floor_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.floor_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.floor_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.floor_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.frac: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.frac: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.frac_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.full: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'fill_value', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'names', 'simple_type': 'DimnameList?'}],
torch._C._VariableFunctions.full: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'fill_value', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.full: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'fill_value', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.full_like: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'fill_value', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.from_file: [{'is_kwarg_only': 'False', 'name': 'filename', 'simple_type': 'c10::string_view'}],
torch._C._VariableFunctions.gcd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.gcd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.gcd_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.lcm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.lcm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.lcm_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.grid_sampler: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'grid', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'interpolation_mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'padding_mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}],
torch._C._VariableFunctions.grid_sampler_2d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'grid', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'interpolation_mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'padding_mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}],
torch._C._VariableFunctions._grid_sampler_2d_cpu_fallback: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'grid', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'interpolation_mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'padding_mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}],
torch._C._VariableFunctions.grid_sampler_3d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'grid', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'interpolation_mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'padding_mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}],
torch._C._VariableFunctions.hann_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.hann_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'periodic', 'simple_type': 'bool'}],
torch._C._VariableFunctions.hamming_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.hamming_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'periodic', 'simple_type': 'bool'}],
torch._C._VariableFunctions.hamming_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'periodic', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'alpha', 'simple_type': 'double'}],
torch._C._VariableFunctions.hamming_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'periodic', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'alpha', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'beta', 'simple_type': 'double'}],
torch._C._VariableFunctions.kaiser_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.kaiser_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'periodic', 'simple_type': 'bool'}],
torch._C._VariableFunctions.kaiser_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'periodic', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'beta', 'simple_type': 'double'}],
torch._C._VariableFunctions.hinge_embedding_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.group_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'num_groups', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.native_group_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'N', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'C', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'HxW', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'group', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}],
torch._C._VariableFunctions._fft_r2c: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'normalization', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'onesided', 'simple_type': 'bool'}],
torch._C._VariableFunctions._fft_r2c: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'normalization', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'onesided', 'simple_type': 'bool'}],
torch._C._VariableFunctions._fft_c2r: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'normalization', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'last_dim_size', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions._fft_c2r: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'normalization', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'last_dim_size', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions._fft_c2c: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'normalization', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'forward', 'simple_type': 'bool'}],
torch._C._VariableFunctions._fft_c2c: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'normalization', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'forward', 'simple_type': 'bool'}],
torch._C._VariableFunctions._validate_compressed_sparse_indices: [{'is_kwarg_only': 'False', 'name': 'is_crow', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'compressed_idx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'plain_idx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'cdim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'nnz', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._cufft_get_plan_cache_size: [{'is_kwarg_only': 'False', 'name': 'device_index', 'simple_type': 'DeviceIndex'}],
torch._C._VariableFunctions._cufft_get_plan_cache_max_size: [{'is_kwarg_only': 'False', 'name': 'device_index', 'simple_type': 'DeviceIndex'}],
torch._C._VariableFunctions._cufft_set_plan_cache_max_size: [{'is_kwarg_only': 'False', 'name': 'device_index', 'simple_type': 'DeviceIndex'}, {'is_kwarg_only': 'False', 'name': 'max_size', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._cufft_clear_plan_cache: [{'is_kwarg_only': 'False', 'name': 'device_index', 'simple_type': 'DeviceIndex'}],
torch._C._VariableFunctions._unsafe_index: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'c10::List<::std::optional<Tensor>>'}],
torch._C._VariableFunctions._unsafe_masked_index: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'c10::List<::std::optional<Tensor>>'}, {'is_kwarg_only': 'False', 'name': 'fill', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions._unsafe_masked_index_put_accumulate: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'c10::List<::std::optional<Tensor>>'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.index_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.index_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.index_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.index_put_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'c10::List<::std::optional<Tensor>>'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.index_put: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'c10::List<::std::optional<Tensor>>'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._unsafe_index_put: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'c10::List<::std::optional<Tensor>>'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._index_put_impl_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'c10::List<::std::optional<Tensor>>'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.instance_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'use_input_stats', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'cudnn_enabled', 'simple_type': 'bool'}],
torch._C._VariableFunctions.isclose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.isin: [{'is_kwarg_only': 'False', 'name': 'elements', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'test_elements', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.isin: [{'is_kwarg_only': 'False', 'name': 'elements', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'test_elements', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.isin: [{'is_kwarg_only': 'False', 'name': 'elements', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'test_element', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.isin: [{'is_kwarg_only': 'False', 'name': 'elements', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'test_element', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.isin: [{'is_kwarg_only': 'False', 'name': 'element', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'test_elements', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.isin: [{'is_kwarg_only': 'False', 'name': 'element', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'test_elements', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.isnan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.is_distributed: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.is_floating_point: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.is_complex: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.is_conj: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._is_zerotensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.is_neg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.isreal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.is_nonzero: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.is_same_size: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.is_signed: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.is_inference: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.kl_div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.kron: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.kron: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.kthvalue: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.kthvalue: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.kthvalue: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.kthvalue: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.layer_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'normalized_shape', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.native_layer_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'normalized_shape', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}],
torch._C._VariableFunctions.rms_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'normalized_shape', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.nan_to_num: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.nan_to_num: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.nan_to_num_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.mkldnn_linear_backward_weights: [{'is_kwarg_only': 'False', 'name': 'grad_output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias_defined', 'simple_type': 'bool'}],
torch._C._VariableFunctions._cslt_compress: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._cslt_sparse_mm: [{'is_kwarg_only': 'False', 'name': 'compressed_A', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dense_B', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._cslt_sparse_mm_search: [{'is_kwarg_only': 'False', 'name': 'compressed_A', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dense_B', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._sparse_semi_structured_tile: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._sparse_semi_structured_apply: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'thread_masks', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._sparse_semi_structured_apply_dense: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'thread_masks', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._sparse_semi_structured_linear: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'meta', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._sparse_semi_structured_mm: [{'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1_meta', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._sparse_semi_structured_addmm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1_meta', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._mixed_dtypes_linear: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.fbgemm_linear_int8_weight_fp32_activation: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight_scale', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'weight_zero_point', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.fbgemm_linear_int8_weight: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight_scale', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'weight_zero_point', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.fbgemm_linear_quantize_weight: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.fbgemm_pack_gemm_matrix_fp16: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._wrapped_linear_prepack: [{'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight_scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight_zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._wrapped_quantized_linear_prepacked: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input_scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input_zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'out_channel', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.fbgemm_linear_fp16_weight_fp32_activation: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.fbgemm_linear_fp16_weight: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.fbgemm_pack_quantized_matrix: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.fbgemm_pack_quantized_matrix: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'K', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'N', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.ldexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.ldexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.ldexp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.linspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.linspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.linspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.linspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.linspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.linspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.linspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.linspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.log: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.log: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.log_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.log10: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.log10: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.log10_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.log1p: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.log1p: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.log1p_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.log2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.log2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.log2_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.logaddexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.logaddexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.logaddexp2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.logaddexp2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.xlogy_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.xlogy_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.logspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.logspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.logspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.logspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.logspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.logspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.logspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.logspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.log_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.log_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.log_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions._log_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'half_to_float', 'simple_type': 'bool'}],
torch._C._VariableFunctions._log_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'half_to_float', 'simple_type': 'bool'}],
torch._C._VariableFunctions._log_softmax_backward_data: [{'is_kwarg_only': 'False', 'name': 'grad_output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'input_dtype', 'simple_type': 'ScalarType'}],
torch._C._VariableFunctions._log_softmax_backward_data: [{'is_kwarg_only': 'False', 'name': 'grad_output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'input_dtype', 'simple_type': 'ScalarType'}],
torch._C._VariableFunctions._logcumsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._logcumsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.logcumsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.logcumsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.logcumsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.logcumsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.logsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}],
torch._C._VariableFunctions.logsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}],
torch._C._VariableFunctions.logsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}],
torch._C._VariableFunctions.logsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}],
torch._C._VariableFunctions.margin_ranking_loss: [{'is_kwarg_only': 'False', 'name': 'input1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.matmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.matmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.matrix_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.matrix_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.matrix_exp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._aminmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._aminmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.aminmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.aminmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._compute_linear_combination: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'coefficients', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._compute_linear_combination: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'coefficients', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.amax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.amax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.max_pool1d_with_indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 1}],
torch._C._VariableFunctions.max_pool1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 1}],
torch._C._VariableFunctions.max_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}],
torch._C._VariableFunctions.mkldnn_max_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}],
torch._C._VariableFunctions.mkldnn_max_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 3}],
torch._C._VariableFunctions.quantized_max_pool1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 1}],
torch._C._VariableFunctions.quantized_max_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}],
torch._C._VariableFunctions.quantized_max_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 3}],
torch._C._VariableFunctions.max_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 3}],
torch._C._VariableFunctions.mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}],
torch._C._VariableFunctions.mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}],
torch._C._VariableFunctions.mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}],
torch._C._VariableFunctions.mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}],
torch._C._VariableFunctions.nanmean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.nanmean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.median: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.median: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.median: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.median: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.median: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.nanmedian: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.nanmedian: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.nanmedian: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.nanmedian: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.nanmedian: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.amin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.amin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._mps_convolution: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.mkldnn_convolution: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.mkldnn_rnn_layer: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight0', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight3', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx_', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'cx_', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reverse', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'batch_sizes', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'hidden_size', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}],
torch._C._VariableFunctions.miopen_batch_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'exponential_average_factor', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'epsilon', 'simple_type': 'double'}],
torch._C._VariableFunctions.miopen_convolution: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'benchmark', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'deterministic', 'simple_type': 'bool'}],
torch._C._VariableFunctions.miopen_convolution_transpose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'output_padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'benchmark', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'deterministic', 'simple_type': 'bool'}],
torch._C._VariableFunctions.miopen_depthwise_convolution: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'benchmark', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'deterministic', 'simple_type': 'bool'}],
torch._C._VariableFunctions.miopen_convolution_relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.miopen_convolution_add_relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'z', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'alpha', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.miopen_rnn: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'weight_stride0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'cx', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'hidden_size', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'batch_sizes', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dropout_state', 'simple_type': 'Tensor?'}],
torch._C._VariableFunctions.mm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.mm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._int_mm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._int_mm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._convert_weight_to_int4pack: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'innerKTiles', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._weight_int4pack_mm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'qGroupSize', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'qScaleAndZeros', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._convert_weight_to_int4pack_for_cpu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'innerKTiles', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._weight_int4pack_mm_for_cpu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'qGroupSize', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'qScaleAndZeros', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._dyn_quant_pack_4bit_weight: [{'is_kwarg_only': 'False', 'name': 'weights', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scales_zeros', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'block_size', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'in_features', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'out_features', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._dyn_quant_matmul_4bit: [{'is_kwarg_only': 'False', 'name': 'inp', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_weights', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'block_size', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'in_features', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'out_features', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._weight_int8pack_mm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scales', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._sparse_sparse_matmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.mode: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.mode: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.mode: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.mode: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.mul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.mul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.multiply: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.multiply: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.multiply: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.mv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.mv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.mvlgamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.mvlgamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.narrow_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'length', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.narrow_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'length', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.narrow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'length', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.narrow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'length', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.native_batch_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}],
torch._C._VariableFunctions.native_batch_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}],
torch._C._VariableFunctions._native_batch_norm_legit: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}],
torch._C._VariableFunctions._native_batch_norm_legit: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}],
torch._C._VariableFunctions._native_batch_norm_legit: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}],
torch._C._VariableFunctions._native_batch_norm_legit: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}],
torch._C._VariableFunctions._native_batch_norm_legit_no_training: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}],
torch._C._VariableFunctions.batch_norm_stats: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}],
torch._C._VariableFunctions.batch_norm_elemt: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'invstd', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}],
torch._C._VariableFunctions.batch_norm_elemt: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'invstd', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}],
torch._C._VariableFunctions.batch_norm_gather_stats: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'invstd', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'count', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.batch_norm_gather_stats_with_counts: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'invstd', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'counts', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.batch_norm_backward_reduce: [{'is_kwarg_only': 'False', 'name': 'grad_out', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'invstd', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'input_g', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'weight_g', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bias_g', 'simple_type': 'bool'}],
torch._C._VariableFunctions.batch_norm_backward_elemt: [{'is_kwarg_only': 'False', 'name': 'grad_out', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'invstd', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'sum_dy', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sum_dy_xmu', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'count', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.batch_norm_update_stats: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}],
torch._C._VariableFunctions.is_vulkan_available: [],
torch._C._VariableFunctions._nnpack_available: [],
torch._C._VariableFunctions._nnpack_spatial_convolution: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 2}],
torch._C._VariableFunctions.ones: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'names', 'simple_type': 'DimnameList?'}],
torch._C._VariableFunctions.ones: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.ones: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.ones_like: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.pairwise_distance: [{'is_kwarg_only': 'False', 'name': 'x1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'x2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.cdist: [{'is_kwarg_only': 'False', 'name': 'x1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'x2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._euclidean_dist: [{'is_kwarg_only': 'False', 'name': 'x1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'x2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.pdist: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.cosine_similarity: [{'is_kwarg_only': 'False', 'name': 'x1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'x2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.permute: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions.movedim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'destination', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions.movedim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'destination', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.moveaxis: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'destination', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions.moveaxis: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'destination', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.adjoint: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.pixel_shuffle: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'upscale_factor', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.pixel_unshuffle: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'downscale_factor', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.channel_shuffle: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.native_channel_shuffle: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions._pin_memory: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.pinverse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.poisson_nll_loss: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'log_input', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'full', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'reduction', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.rad2deg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.rad2deg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.rad2deg_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.deg2rad: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.deg2rad: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.deg2rad_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.scalar_tensor: [{'is_kwarg_only': 'False', 'name': 's', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.rand: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'names', 'simple_type': 'DimnameList?'}],
torch._C._VariableFunctions.rand: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}, {'is_kwarg_only': 'True', 'name': 'names', 'simple_type': 'DimnameList?'}],
torch._C._VariableFunctions.rand: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.rand: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}],
torch._C._VariableFunctions.rand: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.rand: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}],
torch._C._VariableFunctions.rand_like: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.randint: [{'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.randint: [{'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}],
torch._C._VariableFunctions.randint: [{'is_kwarg_only': 'False', 'name': 'low', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.randint: [{'is_kwarg_only': 'False', 'name': 'low', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}],
torch._C._VariableFunctions.randint: [{'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.randint: [{'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}],
torch._C._VariableFunctions.randint: [{'is_kwarg_only': 'False', 'name': 'low', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.randint: [{'is_kwarg_only': 'False', 'name': 'low', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}],
torch._C._VariableFunctions.randint_like: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.randint_like: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'low', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.randn: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.randn: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}],
torch._C._VariableFunctions.randn: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'names', 'simple_type': 'DimnameList?'}],
torch._C._VariableFunctions.randn: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}, {'is_kwarg_only': 'True', 'name': 'names', 'simple_type': 'DimnameList?'}],
torch._C._VariableFunctions.randn: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.randn: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}],
torch._C._VariableFunctions.randn_like: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.randperm: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.randperm: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}],
torch._C._VariableFunctions.randperm: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.randperm: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}],
torch._C._VariableFunctions.ravel: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.reciprocal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.reciprocal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.reciprocal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.neg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.neg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.neg_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.negative: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.negative: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.negative_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.repeat_interleave: [{'is_kwarg_only': 'False', 'name': 'repeats', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.repeat_interleave: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'repeats', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.repeat_interleave: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'repeats', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.reshape: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'shape', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions._mkldnn_reshape: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'shape', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions.round: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.round: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.round: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.round: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.round_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.round_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.rrelu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.rrelu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.relu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.prelu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._prelu_kernel: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.hardshrink: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.hardshrink: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.rsqrt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.rsqrt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.rsqrt_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.selu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.selu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.celu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.celu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.sigmoid: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.sigmoid: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.sigmoid_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.logit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.logit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.logit_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.sin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.sin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.sin_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.sinc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.sinc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.sinc_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.sinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.sinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.sinh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.detach: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.detach_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.slice_inverse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.slice_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.slice_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.select_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.diagonal_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.as_strided_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.smm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions._softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'half_to_float', 'simple_type': 'bool'}],
torch._C._VariableFunctions._softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'half_to_float', 'simple_type': 'bool'}],
torch._C._VariableFunctions._softmax_backward_data: [{'is_kwarg_only': 'False', 'name': 'grad_output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'input_dtype', 'simple_type': 'ScalarType'}],
torch._C._VariableFunctions._softmax_backward_data: [{'is_kwarg_only': 'False', 'name': 'grad_output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'input_dtype', 'simple_type': 'ScalarType'}],
torch._C._VariableFunctions.unsafe_split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_size', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_size', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_size', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.unsafe_split_with_sizes: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_sizes', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.split_with_sizes: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_sizes', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.hsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sections', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.hsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions.vsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sections', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.vsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions.dsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sections', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.dsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions.squeeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.squeeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.squeeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.squeeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions.sspaddmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.sspaddmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._chunk_cat: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'num_chunks', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._chunk_cat: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'num_chunks', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.stack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.stack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._stack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._stack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.hstack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.hstack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.vstack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.vstack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.dstack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.dstack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.stft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n_fft', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.stft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n_fft', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.istft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n_fft', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}],
torch._C._VariableFunctions.sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}],
torch._C._VariableFunctions.sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}],
torch._C._VariableFunctions.sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}],
torch._C._VariableFunctions.nansum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.nansum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.sqrt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.sqrt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.sqrt_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.square: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.square: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.square_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}],
torch._C._VariableFunctions.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}],
torch._C._VariableFunctions.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}],
torch._C._VariableFunctions.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}],
torch._C._VariableFunctions.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}],
torch._C._VariableFunctions.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}],
torch._C._VariableFunctions.std_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.std_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}],
torch._C._VariableFunctions.std_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.std_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}],
torch._C._VariableFunctions.std_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}],
torch._C._VariableFunctions.prod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.prod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.prod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.prod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.prod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.t: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.tan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.tan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.tan_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.tanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.tanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.tanh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.tensordot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims_self', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dims_other', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions.tensordot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims_self', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dims_other', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions.threshold: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'threshold', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.threshold: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'threshold', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.threshold_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'threshold', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.tile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.transpose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.transpose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions._mkldnn_transpose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._mkldnn_transpose_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.flip: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions.fliplr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.flipud: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.roll: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'shifts', 'simple_type': 'SymIntArrayRef', 'size': 1}],
torch._C._VariableFunctions.rot90: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.trapezoid: [{'is_kwarg_only': 'False', 'name': 'y', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.trapezoid: [{'is_kwarg_only': 'False', 'name': 'y', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.trapz: [{'is_kwarg_only': 'False', 'name': 'y', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.trapz: [{'is_kwarg_only': 'False', 'name': 'y', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._transform_bias_rescale_qkv: [{'is_kwarg_only': 'False', 'name': 'qkv', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'qkv_bias', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'num_heads', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._nested_tensor_from_mask: [{'is_kwarg_only': 'False', 'name': 't', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._nested_tensor_from_mask_left_aligned: [{'is_kwarg_only': 'False', 'name': 't', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._nested_from_padded: [{'is_kwarg_only': 'False', 'name': 'padded', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'cpu_nested_shape_example', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._nested_from_padded_and_nested_example: [{'is_kwarg_only': 'False', 'name': 'padded', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'nt_example', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._nested_view_from_buffer: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'nested_size', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'nested_strides', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'offsets', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._nested_view_from_buffer_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'nested_size', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'nested_strides', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'offsets', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._nested_view_from_buffer_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'nested_size', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'nested_strides', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'offsets', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._nested_view_from_jagged: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'offsets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dummy', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._nested_view_from_jagged_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'offsets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dummy', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._nested_view_from_jagged_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'offsets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dummy', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._nested_get_values: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._nested_get_values_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._nested_get_values_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._nested_get_offsets: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._nested_get_lengths: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._nested_get_ragged_idx: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._nested_get_min_seqlen: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._nested_get_max_seqlen: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._nested_get_jagged_dummy: [{'is_kwarg_only': 'False', 'name': 'any', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._nested_compute_contiguous_strides_offsets: [{'is_kwarg_only': 'False', 'name': 'nested_size', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._trilinear: [{'is_kwarg_only': 'False', 'name': 'i1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'i2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'i3', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'expand1', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'expand2', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'expand3', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'sumdim', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions.triplet_margin_loss: [{'is_kwarg_only': 'False', 'name': 'anchor', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'positive', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'negative', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.trunc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.trunc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.trunc_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.fix: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.fix: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.fix_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._has_compatible_shallow_copy_type: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'from', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._unique: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.unique_dim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.unique_consecutive: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._unique2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.unsqueeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.vander: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}],
torch._C._VariableFunctions.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}],
torch._C._VariableFunctions.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}],
torch._C._VariableFunctions.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}],
torch._C._VariableFunctions.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}],
torch._C._VariableFunctions.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}],
torch._C._VariableFunctions.var_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.var_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}],
torch._C._VariableFunctions.var_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.var_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}],
torch._C._VariableFunctions.var_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}],
torch._C._VariableFunctions.where: [{'is_kwarg_only': 'False', 'name': 'condition', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.where: [{'is_kwarg_only': 'False', 'name': 'condition', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.where: [{'is_kwarg_only': 'False', 'name': 'condition', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.where: [{'is_kwarg_only': 'False', 'name': 'condition', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.where: [{'is_kwarg_only': 'False', 'name': 'condition', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.where: [{'is_kwarg_only': 'False', 'name': 'condition', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.norm_except_dim: [{'is_kwarg_only': 'False', 'name': 'v', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._weight_norm: [{'is_kwarg_only': 'False', 'name': 'v', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'g', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._weight_norm_interface: [{'is_kwarg_only': 'False', 'name': 'v', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'g', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.zeros: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'names', 'simple_type': 'DimnameList?'}],
torch._C._VariableFunctions.zeros: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.zeros: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions._efficientzerotensor: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.zeros_like: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._standard_gamma_grad: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._standard_gamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._dirichlet_grad: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'alpha', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'total', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._sample_dirichlet: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.poisson: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.binomial: [{'is_kwarg_only': 'False', 'name': 'count', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'prob', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.native_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.native_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}, {'is_kwarg_only': 'False', 'name': 'keepdim', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType?'}],
torch._C._VariableFunctions._sparse_sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._sparse_sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}],
torch._C._VariableFunctions._sparse_sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}],
torch._C._VariableFunctions._sparse_sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}],
torch._C._VariableFunctions._sparse_csr_sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}],
torch._C._VariableFunctions._sparse_csr_prod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}],
torch._C._VariableFunctions._sparse_softmax_backward_data: [{'is_kwarg_only': 'False', 'name': 'grad_output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._sparse_log_softmax_backward_data: [{'is_kwarg_only': 'False', 'name': 'grad_output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}],
torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}, {'is_kwarg_only': 'False', 'name': 'keepdim', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}],
torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}],
torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}, {'is_kwarg_only': 'False', 'name': 'keepdim', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}],
torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}],
torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}, {'is_kwarg_only': 'False', 'name': 'keepdim', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}],
torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}],
torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}, {'is_kwarg_only': 'False', 'name': 'keepdim', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}],
torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}],
torch._C._VariableFunctions.frexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.frexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.frobenius_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}],
torch._C._VariableFunctions.frobenius_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}],
torch._C._VariableFunctions.nuclear_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.nuclear_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.nuclear_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 2}],
torch._C._VariableFunctions.nuclear_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 2}],
torch._C._VariableFunctions.clone: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.positive: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.resize_as_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'the_template', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.resize_as_sparse_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'the_template', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.zero_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.sub: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.sub: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.subtract: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.subtract: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.subtract: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.rsub: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.rsub: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.heaviside: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.heaviside: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.addmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.addmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._addmm_activation: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._addmm_activation: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._scaled_mm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale_a', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale_b', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._scaled_mm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale_a', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale_b', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._scaled_grouped_mm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale_a', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale_b', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._validate_sparse_coo_tensor_args: [{'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions._validate_sparse_compressed_tensor_args: [{'is_kwarg_only': 'False', 'name': 'compressed_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'plain_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'layout', 'simple_type': 'Layout'}],
torch._C._VariableFunctions._validate_sparse_csr_tensor_args: [{'is_kwarg_only': 'False', 'name': 'crow_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions._validate_sparse_csc_tensor_args: [{'is_kwarg_only': 'False', 'name': 'ccol_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'row_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions._validate_sparse_bsr_tensor_args: [{'is_kwarg_only': 'False', 'name': 'crow_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions._validate_sparse_bsc_tensor_args: [{'is_kwarg_only': 'False', 'name': 'ccol_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'row_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions._to_cpu: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._coalesce: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.hspmm: [{'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.hspmm: [{'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.unbind: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.unbind: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions._to_sparse_semi_structured: [{'is_kwarg_only': 'False', 'name': 'dense', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.quantize_per_tensor_dynamic: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType'}, {'is_kwarg_only': 'False', 'name': 'reduce_range', 'simple_type': 'bool'}],
torch._C._VariableFunctions.quantize_per_tensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType'}],
torch._C._VariableFunctions.quantize_per_tensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType'}],
torch._C._VariableFunctions.quantize_per_tensor: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scales', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_points', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType'}],
torch._C._VariableFunctions.quantize_per_channel: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scales', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_points', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'axis', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType'}],
torch._C._VariableFunctions.dequantize: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.dequantize: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.q_scale: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.q_zero_point: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.q_per_channel_scales: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.q_per_channel_zero_points: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.q_per_channel_axis: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.int_repr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._make_per_tensor_quantized_tensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._make_per_channel_quantized_tensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'axis', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.fake_quantize_per_tensor_affine: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_min', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_max', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.fake_quantize_per_tensor_affine: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'quant_min', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_max', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._fake_quantize_per_tensor_affine_cachemask_tensor_qparams: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'fake_quant_enabled', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'quant_min', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_max', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._fake_quantize_learnable_per_tensor_affine: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'quant_min', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_max', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.fake_quantize_per_channel_affine: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'axis', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_min', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_max', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._fake_quantize_learnable_per_channel_affine: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'axis', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_min', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_max', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.fused_moving_avg_obs_fake_quant: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'observer_on', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'fake_quant_on', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_min', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_max', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'averaging_const', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'quant_min', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_max', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'ch_axis', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._fused_moving_avg_obs_fq_helper: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'observer_on', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'fake_quant_on', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_min', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_max', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'averaging_const', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'quant_min', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_max', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'ch_axis', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._choose_qparams_per_tensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._saturate_weight_to_fp16: [{'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.choose_qparams_optimized: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'numel', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'n_bins', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'ratio', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'bit_width', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.meshgrid: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.meshgrid: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'True', 'name': 'indexing', 'simple_type': 'c10::string_view'}],
torch._C._VariableFunctions.cartesian_prod: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.combinations: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.result_type: [{'is_kwarg_only': 'False', 'name': 'tensor', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.result_type: [{'is_kwarg_only': 'False', 'name': 'tensor', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.result_type: [{'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'tensor', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.result_type: [{'is_kwarg_only': 'False', 'name': 'scalar1', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'scalar2', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.can_cast: [{'is_kwarg_only': 'False', 'name': 'from_', 'simple_type': 'ScalarType'}, {'is_kwarg_only': 'False', 'name': 'to', 'simple_type': 'ScalarType'}],
torch._C._VariableFunctions.promote_types: [{'is_kwarg_only': 'False', 'name': 'type1', 'simple_type': 'ScalarType'}, {'is_kwarg_only': 'False', 'name': 'type2', 'simple_type': 'ScalarType'}],
torch._C._VariableFunctions._lstm_mps: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'params', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}],
torch._C._VariableFunctions.lstm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'params', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}],
torch._C._VariableFunctions.lstm: [{'is_kwarg_only': 'False', 'name': 'data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch_sizes', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'params', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}],
torch._C._VariableFunctions.gru: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'params', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}],
torch._C._VariableFunctions.gru: [{'is_kwarg_only': 'False', 'name': 'data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch_sizes', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'params', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}],
torch._C._VariableFunctions.rnn_tanh: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'params', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}],
torch._C._VariableFunctions.rnn_tanh: [{'is_kwarg_only': 'False', 'name': 'data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch_sizes', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'params', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}],
torch._C._VariableFunctions.rnn_relu: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'params', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}],
torch._C._VariableFunctions.rnn_relu: [{'is_kwarg_only': 'False', 'name': 'data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch_sizes', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'params', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}],
torch._C._VariableFunctions.lstm_cell: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'w_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_hh', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.gru_cell: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_hh', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.rnn_tanh_cell: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_hh', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.rnn_relu_cell: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_hh', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.quantized_lstm_cell: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'w_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale_ih', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'scale_hh', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'zero_point_ih', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'zero_point_hh', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.quantized_gru_cell: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale_ih', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'scale_hh', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'zero_point_ih', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'zero_point_hh', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.quantized_rnn_relu_cell: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale_ih', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'scale_hh', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'zero_point_ih', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'zero_point_hh', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.quantized_rnn_tanh_cell: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale_ih', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'scale_hh', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'zero_point_ih', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'zero_point_hh', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions._pack_padded_sequence: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'lengths', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}],
torch._C._VariableFunctions._pad_packed_sequence: [{'is_kwarg_only': 'False', 'name': 'data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch_sizes', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'padding_value', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'total_length', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.masked_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.masked_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.masked_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._masked_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.put: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.index_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.index_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.index_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.index_reduce: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reduce', 'simple_type': 'c10::string_view'}],
torch._C._VariableFunctions.index_reduce: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reduce', 'simple_type': 'c10::string_view'}],
torch._C._VariableFunctions.index_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.index_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.index_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.index_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'reduce', 'simple_type': 'c10::string_view'}],
torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'reduce', 'simple_type': 'c10::string_view'}],
torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'reduce', 'simple_type': 'c10::string_view'}],
torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'reduce', 'simple_type': 'c10::string_view'}],
torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.scatter_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.scatter_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.scatter_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.scatter_reduce: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reduce', 'simple_type': 'c10::string_view'}],
torch._C._VariableFunctions.scatter_reduce: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reduce', 'simple_type': 'c10::string_view'}],
torch._C._VariableFunctions.bitwise_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.bitwise_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.bitwise_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.bitwise_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.bitwise_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.__and__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.__and__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.bitwise_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.bitwise_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.bitwise_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.bitwise_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.bitwise_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.__or__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.__or__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.bitwise_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.bitwise_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.bitwise_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.bitwise_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.bitwise_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.__xor__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.__xor__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.__lshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.__lshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.bitwise_left_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.bitwise_left_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.bitwise_left_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.bitwise_left_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.bitwise_left_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.__rshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.__rshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.bitwise_right_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.bitwise_right_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.bitwise_right_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.bitwise_right_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.bitwise_right_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.addbmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.addbmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.diag: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.diag: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.cross: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.cross: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.triu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.triu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.tril: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.tril: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.tril_indices: [{'is_kwarg_only': 'False', 'name': 'row', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'col', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.triu_indices: [{'is_kwarg_only': 'False', 'name': 'row', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'col', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.trace: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.ne: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.ne: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.ne: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.ne: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.not_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.not_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.not_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.not_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.eq: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.eq: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.eq: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.eq: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.ge: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.ge: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.ge: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.ge: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.greater_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.greater_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.greater_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.greater_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.le: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.le: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.le: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.le: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.less_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.less_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.less_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.less_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.gt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.gt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.gt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.gt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.greater: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.greater: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.greater: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.greater: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.lt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.lt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.lt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.lt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.less: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.less: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.less: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.less: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.take: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.take: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.take_along_dim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.take_along_dim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.index_select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.index_select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.index_select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.index_select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.masked_select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.masked_select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.nonzero_static: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'size', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.nonzero_static: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'size', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.argwhere: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.gather: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.gather: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.gather: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.gather: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.addcmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.addcmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.addcdiv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.addcdiv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.triangular_solve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.triangular_solve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._linalg_check_errors: [{'is_kwarg_only': 'False', 'name': 'info', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'api_name', 'simple_type': 'c10::string_view'}, {'is_kwarg_only': 'True', 'name': 'is_matrix', 'simple_type': 'bool'}],
torch._C._VariableFunctions.svd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.svd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.swapaxes: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'axis0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'axis1', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.swapdims: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.cholesky: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.cholesky: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.cholesky_solve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.cholesky_solve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.cholesky_inverse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.cholesky_inverse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.qr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.qr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.geqrf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.geqrf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.orgqr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.orgqr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.ormqr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input3', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.ormqr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input3', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._lu_with_info: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.lu_solve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'LU_data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'LU_pivots', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.lu_solve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'LU_data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'LU_pivots', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.lu_unpack: [{'is_kwarg_only': 'False', 'name': 'LU_data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'LU_pivots', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.lu_unpack: [{'is_kwarg_only': 'False', 'name': 'LU_data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'LU_pivots', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.multinomial: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'num_samples', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.multinomial: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'num_samples', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.lgamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.lgamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.digamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.digamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.polygamma: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.polygamma: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.erfinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.erfinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.i0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.i0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.i0_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.sign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.sign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.signbit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.signbit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.dist: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.atan2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.atan2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.arctan2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.arctan2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.lerp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.lerp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.lerp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.lerp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.histc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.histc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.histogram: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bins', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.histogram: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bins', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.histogram: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.histogram: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._histogramdd_bin_edges: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bins', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions._histogramdd_from_bin_cts: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bins', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions._histogramdd_from_bin_tensors: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bins', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.histogramdd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bins', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions.histogramdd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bins', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.histogramdd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bins', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.fmod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.fmod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.fmod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.fmod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.hypot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.hypot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.igamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.igamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.igammac: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.igammac: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.nextafter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.nextafter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.remainder: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.remainder: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.remainder: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.remainder: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.remainder: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.fmin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.fmin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.fmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.fmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.maximum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.maximum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.minimum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.minimum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.quantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.quantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.quantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'double'}],
torch._C._VariableFunctions.quantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'double'}],
torch._C._VariableFunctions.nanquantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.nanquantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.nanquantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'double'}],
torch._C._VariableFunctions.nanquantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'double'}],
torch._C._VariableFunctions.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'stable', 'simple_type': 'bool?'}],
torch._C._VariableFunctions.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'stable', 'simple_type': 'bool?'}],
torch._C._VariableFunctions.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'stable', 'simple_type': 'bool?'}, {'is_kwarg_only': 'True', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'stable', 'simple_type': 'bool?'}, {'is_kwarg_only': 'True', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.msort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.msort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.argsort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.argsort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'stable', 'simple_type': 'bool'}],
torch._C._VariableFunctions.argsort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'stable', 'simple_type': 'bool'}],
torch._C._VariableFunctions.argsort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch._C._VariableFunctions.topk: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.topk: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.renorm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'maxnorm', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.renorm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'maxnorm', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.float_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.float_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.float_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.float_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.float_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.float_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.normal: [{'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.normal: [{'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.normal: [{'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'std', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.normal: [{'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'std', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.normal: [{'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'std', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.normal: [{'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'std', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.normal: [{'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'std', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.normal: [{'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'std', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions._amp_foreach_non_finite_check_and_unscale_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'found_inf', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'inv_scale', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._amp_update_scale_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'growth_tracker', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'found_inf', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale_growth_factor', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'scale_backoff_factor', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'growth_interval', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._foreach_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions._foreach_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}],
torch._C._VariableFunctions._foreach_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._foreach_add_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions._foreach_add_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_add_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}],
torch._C._VariableFunctions._foreach_add_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._foreach_sub: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions._foreach_sub: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_sub: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}],
torch._C._VariableFunctions._foreach_sub_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions._foreach_sub_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_sub_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}],
torch._C._VariableFunctions._foreach_mul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions._foreach_mul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_mul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}],
torch._C._VariableFunctions._foreach_mul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._foreach_mul_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions._foreach_mul_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_mul_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}],
torch._C._VariableFunctions._foreach_mul_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._foreach_div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions._foreach_div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}],
torch._C._VariableFunctions._foreach_div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._foreach_div_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions._foreach_div_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_div_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}],
torch._C._VariableFunctions._foreach_div_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._foreach_clamp_max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions._foreach_clamp_max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_clamp_max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}],
torch._C._VariableFunctions._foreach_clamp_max_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions._foreach_clamp_max_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_clamp_max_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}],
torch._C._VariableFunctions._foreach_clamp_min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions._foreach_clamp_min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_clamp_min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}],
torch._C._VariableFunctions._foreach_clamp_min_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions._foreach_clamp_min_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_clamp_min_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}],
torch._C._VariableFunctions._foreach_maximum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions._foreach_maximum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_maximum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}],
torch._C._VariableFunctions._foreach_maximum_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions._foreach_maximum_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_maximum_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}],
torch._C._VariableFunctions._foreach_minimum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions._foreach_minimum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_minimum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}],
torch._C._VariableFunctions._foreach_minimum_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions._foreach_minimum_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_minimum_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}],
torch._C._VariableFunctions._foreach_addcdiv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_addcdiv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}],
torch._C._VariableFunctions._foreach_addcdiv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._foreach_addcdiv_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_addcdiv_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}],
torch._C._VariableFunctions._foreach_addcdiv_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._foreach_addcmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_addcmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}],
torch._C._VariableFunctions._foreach_addcmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._foreach_addcmul_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_addcmul_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}],
torch._C._VariableFunctions._foreach_addcmul_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._foreach_abs: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_abs_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_acos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_acos_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_asin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_asin_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_atan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_atan_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_ceil: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_ceil_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_cos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_cos_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_cosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_cosh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_erf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_erf_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_erfc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_erfc_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_exp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_exp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_expm1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_expm1_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_floor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_floor_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_frac: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_frac_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_lerp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensors1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'weights', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_lerp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensors1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions._foreach_lerp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensors1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'ScalarList'}],
torch._C._VariableFunctions._foreach_lerp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensors1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'weights', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_lerp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensors1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions._foreach_lerp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensors1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'ScalarList'}],
torch._C._VariableFunctions._foreach_lgamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_lgamma_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_log: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_log_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_log10: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_log10_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_log1p: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_log1p_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_log2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_log2_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_neg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_neg_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions._foreach_pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'ScalarList'}],
torch._C._VariableFunctions._foreach_pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_pow_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_pow_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions._foreach_pow_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'ScalarList'}],
torch._C._VariableFunctions._foreach_reciprocal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_reciprocal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_round: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_round_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_rsqrt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_rsqrt_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_sigmoid: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_sigmoid_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_sign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_sign_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_sin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_sin_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_sinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_sinh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_sqrt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_sqrt_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_tan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_tan_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_tanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_tanh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_trunc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_trunc_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_zero_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._foreach_copy_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.bucketize: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'boundaries', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.bucketize: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'boundaries', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.bucketize: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'boundaries', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.searchsorted: [{'is_kwarg_only': 'False', 'name': 'sorted_sequence', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.searchsorted: [{'is_kwarg_only': 'False', 'name': 'sorted_sequence', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.searchsorted: [{'is_kwarg_only': 'False', 'name': 'sorted_sequence', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions.searchsorted: [{'is_kwarg_only': 'False', 'name': 'sorted_sequence', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}],
torch._C._VariableFunctions._convert_indices_from_coo_to_csr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._convert_indices_from_coo_to_csr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._convert_indices_from_csr_to_coo: [{'is_kwarg_only': 'False', 'name': 'crow_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_indices', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._convert_indices_from_csr_to_coo: [{'is_kwarg_only': 'False', 'name': 'crow_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_indices', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.mkldnn_adaptive_avg_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 2}],
torch._C._VariableFunctions.mkldnn_adaptive_avg_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 2}],
torch._C._VariableFunctions._adaptive_avg_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}],
torch._C._VariableFunctions._adaptive_avg_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}],
torch._C._VariableFunctions.column_stack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.column_stack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions.isfinite: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.isinf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.isposinf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.isposinf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.isneginf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.isneginf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._add_batch_dim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'level', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._remove_batch_dim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'level', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'batch_size', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'out_dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._linalg_det: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._linalg_det: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.det: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._linalg_slogdet: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._linalg_slogdet: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.slogdet: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.slogdet: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.logdet: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._linalg_eigh: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._linalg_eigh: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.inverse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.inverse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.inner: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.inner: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.outer: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.outer: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.ger: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.ger: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._linalg_svd: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._linalg_svd: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._linalg_solve_ex: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._linalg_solve_ex: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._test_serialization_subcmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._test_parallel_materialize: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'num_parallel', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._test_autograd_multiple_dispatch: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._test_autograd_multiple_dispatch: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b', 'simple_type': 'bool'}],
torch._C._VariableFunctions._test_autograd_multiple_dispatch_view: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._test_autograd_multiple_dispatch_view_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._test_autograd_multiple_dispatch_view_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.segment_reduce: [{'is_kwarg_only': 'False', 'name': 'data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reduce', 'simple_type': 'c10::string_view'}],
torch._C._VariableFunctions._nested_tensor_from_tensor_list: [{'is_kwarg_only': 'False', 'name': 'list', 'simple_type': 'TensorList'}],
torch._C._VariableFunctions._fw_primal_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'level', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._fw_primal_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'level', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._make_dual_copy: [{'is_kwarg_only': 'False', 'name': 'primal', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tangent', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'level', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._make_dual_copy: [{'is_kwarg_only': 'False', 'name': 'primal', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tangent', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'level', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.view_as_real_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.view_as_real_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.view_as_complex_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.view_as_complex_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._conj_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._conj_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._neg_view_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._neg_view_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.as_strided_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.as_strided_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions._sparse_broadcast_to_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions._sparse_broadcast_to_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions.diagonal_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.diagonal_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.expand_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.expand_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.permute_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions.permute_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions._reshape_alias_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions._reshape_alias_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.select_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.select_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.detach_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.detach_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.slice_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.slice_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.split_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_size', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.split_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_size', 'simple_type': 'SymInt'}],
torch._C._VariableFunctions.split_with_sizes_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_sizes', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.split_with_sizes_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_sizes', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.squeeze_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.squeeze_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.squeeze_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions.squeeze_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.squeeze_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.squeeze_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}],
torch._C._VariableFunctions.t_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.t_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.transpose_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.transpose_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.unsqueeze_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.unsqueeze_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._values_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._values_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.values_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.values_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.crow_indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.crow_indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.col_indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.col_indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.ccol_indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.ccol_indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.row_indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.row_indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.unbind_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.unbind_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.view_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.view_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType'}],
torch._C._VariableFunctions.view_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch._C._VariableFunctions.view_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType'}],
torch._C._VariableFunctions.unfold_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dimension', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'step', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.unfold_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dimension', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'step', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions.alias_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions.alias_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._nested_from_padded_tensor: [{'is_kwarg_only': 'False', 'name': 'padded', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'offsets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dummy', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._nested_tensor_softmax_with_shape: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._safe_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._transformer_encoder_layer_fwd: [{'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'embed_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'num_heads', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'qkv_weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'qkv_bias', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'proj_weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'proj_bias', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'use_gelu', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'norm_first', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'norm_weight_1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'norm_bias_1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'norm_weight_2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'norm_bias_2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'ffn_weight_1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'ffn_bias_1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'ffn_weight_2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'ffn_bias_2', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._native_multi_head_attention: [{'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'key', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'embed_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'num_head', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'qkv_weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'qkv_bias', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'proj_weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'proj_bias', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._fused_sdp_choice: [{'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'key', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._scaled_dot_product_attention_math: [{'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'key', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._scaled_dot_product_attention_math_for_mps: [{'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'key', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._scaled_dot_product_flash_attention: [{'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'key', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._scaled_dot_product_flash_attention_for_cpu: [{'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'key', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._scaled_dot_product_efficient_attention: [{'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'key', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'attn_bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'compute_log_sumexp', 'simple_type': 'bool'}],
torch._C._VariableFunctions._scaled_dot_product_cudnn_attention: [{'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'key', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'attn_bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'compute_log_sumexp', 'simple_type': 'bool'}],
torch._C._VariableFunctions._triton_scaled_dot_attention: [{'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'v', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._fill_mem_eff_dropout_mask_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dropout_p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'seed', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'offset', 'simple_type': 'int64_t'}],
torch._C._VariableFunctions._triton_multi_head_attention: [{'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'key', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'embed_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'num_head', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'qkv_weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'qkv_bias', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'proj_weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'proj_bias', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._foobar: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._VariableFunctions._fused_adam_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'grads', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exp_avgs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exp_avg_sqs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'max_exp_avg_sqs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'state_steps', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'True', 'name': 'lr', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'beta1', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'beta2', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'weight_decay', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'amsgrad', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'maximize', 'simple_type': 'bool'}],
torch._C._VariableFunctions._fused_adam_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'grads', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exp_avgs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exp_avg_sqs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'max_exp_avg_sqs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'state_steps', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'True', 'name': 'lr', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'beta1', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'beta2', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'weight_decay', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'amsgrad', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'maximize', 'simple_type': 'bool'}],
torch._C._VariableFunctions._fused_adamw_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'grads', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exp_avgs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exp_avg_sqs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'max_exp_avg_sqs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'state_steps', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'True', 'name': 'lr', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'beta1', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'beta2', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'weight_decay', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'amsgrad', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'maximize', 'simple_type': 'bool'}],
torch._C._VariableFunctions._fused_adamw_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'grads', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exp_avgs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exp_avg_sqs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'max_exp_avg_sqs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'state_steps', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'True', 'name': 'lr', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'beta1', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'beta2', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'weight_decay', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'amsgrad', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'maximize', 'simple_type': 'bool'}],
torch._C._VariableFunctions._fused_sgd_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'grads', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'momentum_buffer_list', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'True', 'name': 'weight_decay', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'lr', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'dampening', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'nesterov', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'maximize', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'is_first_step', 'simple_type': 'bool'}],
torch._C._VariableFunctions._fused_sgd_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'grads', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'momentum_buffer_list', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'True', 'name': 'weight_decay', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'lr', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'dampening', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'nesterov', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'maximize', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'is_first_step', 'simple_type': 'bool'}],
torch._C._VariableFunctions._fused_adagrad_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'grads', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'state_sums', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'state_steps', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'True', 'name': 'lr', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'lr_decay', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'weight_decay', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'maximize', 'simple_type': 'bool'}],
torch._C._VariableFunctions._propagate_xla_data: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output', 'simple_type': 'Tensor'}],
torch._C._nn.binary_cross_entropy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}],
torch._C._nn.binary_cross_entropy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}],
torch._C._nn.linear: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}],
torch._C._nn.linear: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}],
torch._C._nn.mkldnn_linear: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}],
torch._C._nn.relu6: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.relu6_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.gelu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.gelu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.gelu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.silu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.silu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.silu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.mish: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.mish: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.mish_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.one_hot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.mkldnn_reorder_conv2d_weight: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.mkldnn_reorder_conv3d_weight: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.cross_entropy_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}],
torch._C._nn.mse_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}],
torch._C._nn.mse_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}],
torch._C._nn.l1_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}],
torch._C._nn.multi_margin_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}],
torch._C._nn.multi_margin_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}],
torch._C._nn.multilabel_margin_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}],
torch._C._nn.multilabel_margin_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}],
torch._C._nn.nll_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}],
torch._C._nn.nll_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}],
torch._C._nn.nll_loss_nd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}],
torch._C._nn.nll_loss2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}],
torch._C._nn.nll_loss2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}],
torch._C._nn.smooth_l1_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}],
torch._C._nn.smooth_l1_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}],
torch._C._nn.huber_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}],
torch._C._nn.huber_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}],
torch._C._nn.soft_margin_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}],
torch._C._nn.soft_margin_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}],
torch._C._nn.elu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.elu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.elu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.glu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.glu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.hardsigmoid: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.hardsigmoid: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.hardsigmoid_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.hardtanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.hardtanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.hardtanh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.hardswish: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.hardswish: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.hardswish_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.leaky_relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.leaky_relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.leaky_relu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.log_sigmoid: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.log_sigmoid: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.rrelu_with_noise: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'noise', 'simple_type': 'Tensor'}],
torch._C._nn.rrelu_with_noise: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'noise', 'simple_type': 'Tensor'}],
torch._C._nn.rrelu_with_noise_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'noise', 'simple_type': 'Tensor'}],
torch._C._nn.softplus: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.softplus: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.softshrink: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.softshrink: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.adaptive_avg_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}],
torch._C._nn.adaptive_avg_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}],
torch._C._nn.adaptive_avg_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}],
torch._C._nn.adaptive_avg_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}],
torch._C._nn.adaptive_max_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 2}],
torch._C._nn.adaptive_max_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 2}],
torch._C._nn.adaptive_max_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 3}],
torch._C._nn.adaptive_max_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 3}],
torch._C._nn.avg_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}],
torch._C._nn.avg_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}],
torch._C._nn.avg_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 3}],
torch._C._nn.avg_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 3}],
torch._C._nn.fractional_max_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'random_samples', 'simple_type': 'Tensor'}],
torch._C._nn.fractional_max_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'random_samples', 'simple_type': 'Tensor'}],
torch._C._nn.fractional_max_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'random_samples', 'simple_type': 'Tensor'}],
torch._C._nn.fractional_max_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'random_samples', 'simple_type': 'Tensor'}],
torch._C._nn.max_pool2d_with_indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}],
torch._C._nn.max_pool2d_with_indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}],
torch._C._nn.max_pool3d_with_indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 3}],
torch._C._nn.max_pool3d_with_indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 3}],
torch._C._nn.max_unpool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}],
torch._C._nn.max_unpool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}],
torch._C._nn.max_unpool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'IntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'IntArrayRef', 'size': 3}],
torch._C._nn.max_unpool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'IntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'IntArrayRef', 'size': 3}],
torch._C._nn.reflection_pad1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 2}],
torch._C._nn.reflection_pad1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 2}],
torch._C._nn.reflection_pad2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 4}],
torch._C._nn.reflection_pad2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 4}],
torch._C._nn.reflection_pad3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 6}],
torch._C._nn.reflection_pad3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 6}],
torch._C._nn.replication_pad1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 2}],
torch._C._nn.replication_pad1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 2}],
torch._C._nn.replication_pad2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 4}],
torch._C._nn.replication_pad2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 4}],
torch._C._nn.replication_pad3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 6}],
torch._C._nn.replication_pad3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 6}],
torch._C._nn._pad_circular: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'pad', 'simple_type': 'SymIntArrayRef'}],
torch._C._nn._pad_enum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'pad', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'mode', 'simple_type': 'int64_t'}],
torch._C._nn.pad: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'pad', 'simple_type': 'SymIntArrayRef'}],
torch._C._nn.upsample_linear1d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef<double>?'}],
torch._C._nn.upsample_linear1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 1}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}],
torch._C._nn.upsample_linear1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 1}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}],
torch._C._nn.upsample_bilinear2d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef<double>?'}],
torch._C._nn.upsample_bilinear2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}],
torch._C._nn.upsample_bilinear2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}],
torch._C._nn._upsample_bilinear2d_aa: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef<double>?'}],
torch._C._nn._upsample_bilinear2d_aa: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}],
torch._C._nn._upsample_bilinear2d_aa: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}],
torch._C._nn.upsample_trilinear3d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef<double>?'}],
torch._C._nn.upsample_trilinear3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}],
torch._C._nn.upsample_trilinear3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}],
torch._C._nn.upsample_bicubic2d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef<double>?'}],
torch._C._nn.upsample_bicubic2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}],
torch._C._nn.upsample_bicubic2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}],
torch._C._nn._upsample_bicubic2d_aa: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef<double>?'}],
torch._C._nn._upsample_bicubic2d_aa: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}],
torch._C._nn._upsample_bicubic2d_aa: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}],
torch._C._nn.upsample_nearest1d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef<double>?'}],
torch._C._nn.upsample_nearest1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 1}],
torch._C._nn.upsample_nearest1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 1}],
torch._C._nn._upsample_nearest_exact1d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef<double>?'}],
torch._C._nn._upsample_nearest_exact1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 1}],
torch._C._nn._upsample_nearest_exact1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 1}],
torch._C._nn.upsample_nearest2d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef<double>?'}],
torch._C._nn.upsample_nearest2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}],
torch._C._nn.upsample_nearest2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}],
torch._C._nn._upsample_nearest_exact2d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef<double>?'}],
torch._C._nn._upsample_nearest_exact2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}],
torch._C._nn._upsample_nearest_exact2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}],
torch._C._nn.upsample_nearest3d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef<double>?'}],
torch._C._nn.upsample_nearest3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}],
torch._C._nn.upsample_nearest3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}],
torch._C._nn._upsample_nearest_exact3d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef<double>?'}],
torch._C._nn._upsample_nearest_exact3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}],
torch._C._nn._upsample_nearest_exact3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}],
torch._C._nn.slow_conv_transpose2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 2}],
torch._C._nn.slow_conv_transpose2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 2}],
torch._C._nn.slow_conv_transpose3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 3}],
torch._C._nn.slow_conv_transpose3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 3}],
torch._C._nn.thnn_conv2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 2}],
torch._C._nn.thnn_conv2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 2}],
torch._C._nn._conv_depthwise2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef', 'size': 2}],
torch._C._nn._conv_depthwise2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef', 'size': 2}],
torch._C._nn.conv_depthwise3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef', 'size': 3}],
torch._C._nn.slow_conv3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 3}],
torch._C._nn.slow_conv3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 3}],
torch._C._nn.slow_conv_dilated2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 2}],
torch._C._nn.slow_conv_dilated3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 3}],
torch._C._nn.col2im: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'IntArrayRef', 'size': 2}],
torch._C._nn.col2im: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'IntArrayRef', 'size': 2}],
torch._C._nn.im2col: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'IntArrayRef', 'size': 2}],
torch._C._nn.im2col: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'IntArrayRef', 'size': 2}],
torch._C._nn._test_optional_intlist: [{'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'addends', 'simple_type': 'IntArrayRef?'}],
torch._C._nn._test_optional_filled_intlist: [{'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'addends', 'simple_type': 'IntArrayRef?', 'size': 2}],
torch._C._nn._test_optional_floatlist: [{'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'addends', 'simple_type': 'ArrayRef<double>?'}],
torch._C._nn._test_string_default: [{'is_kwarg_only': 'False', 'name': 'dummy', 'simple_type': 'Tensor'}],
torch._C._nn._test_ambiguous_defaults: [{'is_kwarg_only': 'False', 'name': 'dummy', 'simple_type': 'Tensor'}],
torch._C._nn._test_ambiguous_defaults: [{'is_kwarg_only': 'False', 'name': 'dummy', 'simple_type': 'Tensor'}],
torch._C._nn._test_warn_in_autograd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._nn.pad_sequence: [{'is_kwarg_only': 'False', 'name': 'sequences', 'simple_type': 'TensorList'}],
torch._C._nn.flatten_dense_tensors: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._nn.unflatten_dense_tensors: [{'is_kwarg_only': 'False', 'name': 'flat', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._nn.scaled_dot_product_attention: [{'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'key', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_diagonal: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_solve_triangular: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'upper', 'simple_type': 'bool'}],
torch._C._linalg.linalg_solve_triangular: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'upper', 'simple_type': 'bool'}],
torch._C._linalg.linalg_vander: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_cholesky_ex: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_cholesky_ex: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_cholesky: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_cholesky: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_cross: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_cross: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_lu_factor: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_lu_factor: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_lu_factor_ex: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_lu_factor_ex: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_lu: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_lu: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_lu_solve: [{'is_kwarg_only': 'False', 'name': 'LU', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'pivots', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_lu_solve: [{'is_kwarg_only': 'False', 'name': 'LU', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'pivots', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_det: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_det: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_ldl_factor_ex: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_ldl_factor_ex: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_ldl_factor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_ldl_factor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_ldl_solve: [{'is_kwarg_only': 'False', 'name': 'LD', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'pivots', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_ldl_solve: [{'is_kwarg_only': 'False', 'name': 'LD', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'pivots', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_lstsq: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_lstsq: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_matmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_matmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_vecdot: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'y', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_vecdot: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'y', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_matrix_exp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_slogdet: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_slogdet: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_eig: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_eig: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg._linalg_eigvals: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_eigvals: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_eigvals: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_eigh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_eigh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_eigvalsh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_eigvalsh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_householder_product: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tau', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_householder_product: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tau', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_inv_ex: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_inv_ex: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_inv: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_inv: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'ord', 'simple_type': 'c10::string_view'}],
torch._C._linalg.linalg_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'ord', 'simple_type': 'c10::string_view'}],
torch._C._linalg.linalg_vector_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_vector_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_matrix_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'ord', 'simple_type': 'Scalar'}],
torch._C._linalg.linalg_matrix_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'ord', 'simple_type': 'Scalar'}],
torch._C._linalg.linalg_matrix_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_matrix_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_svd: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_svd: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_svdvals: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_svdvals: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_cond: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_cond: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_cond: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'c10::string_view'}],
torch._C._linalg.linalg_cond: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'c10::string_view'}],
torch._C._linalg.linalg_pinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_pinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_pinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_pinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_pinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'rcond', 'simple_type': 'double'}],
torch._C._linalg.linalg_pinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'rcond', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_pinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'rcond', 'simple_type': 'double'}],
torch._C._linalg.linalg_pinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'rcond', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_solve_ex: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_solve_ex: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_solve: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_solve: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_tensorinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_tensorinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_tensorsolve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_tensorsolve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_qr: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_qr: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_matrix_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}],
torch._C._linalg.linalg_matrix_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}],
torch._C._linalg.linalg_matrix_rank: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_matrix_rank: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_matrix_rank: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_matrix_rank: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_matrix_rank: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tol', 'simple_type': 'double'}],
torch._C._linalg.linalg_matrix_rank: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tol', 'simple_type': 'double'}],
torch._C._linalg.linalg_matrix_rank: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tol', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_matrix_rank: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tol', 'simple_type': 'Tensor'}],
torch._C._linalg.linalg_multi_dot: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._linalg.linalg_multi_dot: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}],
torch._C._special.special_entr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_entr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_ndtri: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_ndtri: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_log_ndtr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_log_ndtr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_expm1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_expm1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_exp2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_exp2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_psi: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_psi: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_digamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_digamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_gammaln: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_gammaln: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_erf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_erf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_erfc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_erfc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_erfcx: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_erfcx: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_erfinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_erfinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_ndtr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_ndtr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_xlog1py: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._special.special_xlog1py: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._special.special_xlog1py: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._special.special_xlog1py: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._special.special_xlog1py: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._special.special_xlog1py: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._special.special_xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._special.special_xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._special.special_xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._special.special_xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._special.special_xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._special.special_xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._special.special_zeta: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._special.special_zeta: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._special.special_zeta: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._special.special_zeta: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._special.special_zeta: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._special.special_zeta: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch._C._special.special_i0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_i0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_i0e: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_i0e: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_i1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_i1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_i1e: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_i1e: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_logit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_logit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_polygamma: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_polygamma: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_logsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}],
torch._C._special.special_logsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}],
torch._C._special.special_expit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_expit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_sinc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_sinc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_round: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_round: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_log1p: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_log1p: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_log_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._special.special_gammainc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._special.special_gammainc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._special.special_gammaincc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._special.special_gammaincc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch._C._special.special_multigammaln: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'int64_t'}],
torch._C._special.special_multigammaln: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'int64_t'}],
torch._C._special.special_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch._C._special.special_airy_ai: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}],
torch._C._special.special_airy_ai: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}],
torch._C._special.special_bessel_j0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_bessel_j0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_bessel_j1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_bessel_j1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_bessel_y0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_bessel_y0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_bessel_y1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_bessel_y1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}],
torch._C._special.special_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}],
torch._C._special.special_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}],
torch._C._special.special_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}],
torch._C._special.special_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}],
torch._C._special.special_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}],
torch._C._special.special_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}],
torch._C._special.special_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}],
torch._C._special.special_hermite_polynomial_h: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_hermite_polynomial_h: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_hermite_polynomial_h: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}],
torch._C._special.special_hermite_polynomial_h: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_hermite_polynomial_h: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_hermite_polynomial_h: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}],
torch._C._special.special_hermite_polynomial_he: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_hermite_polynomial_he: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_hermite_polynomial_he: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}],
torch._C._special.special_hermite_polynomial_he: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_hermite_polynomial_he: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_hermite_polynomial_he: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}],
torch._C._special.special_laguerre_polynomial_l: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_laguerre_polynomial_l: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_laguerre_polynomial_l: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}],
torch._C._special.special_laguerre_polynomial_l: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_laguerre_polynomial_l: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_laguerre_polynomial_l: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}],
torch._C._special.special_legendre_polynomial_p: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_legendre_polynomial_p: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_legendre_polynomial_p: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}],
torch._C._special.special_legendre_polynomial_p: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_legendre_polynomial_p: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_legendre_polynomial_p: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}],
torch._C._special.special_modified_bessel_i0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_modified_bessel_i0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_modified_bessel_i1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_modified_bessel_i1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_modified_bessel_k0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_modified_bessel_k0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_modified_bessel_k1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_modified_bessel_k1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._special.special_scaled_modified_bessel_k0: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}],
torch._C._special.special_scaled_modified_bessel_k0: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}],
torch._C._special.special_scaled_modified_bessel_k1: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}],
torch._C._special.special_scaled_modified_bessel_k1: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}],
torch._C._special.special_shifted_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_shifted_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_shifted_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}],
torch._C._special.special_shifted_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_shifted_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_shifted_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}],
torch._C._special.special_shifted_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_shifted_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_shifted_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}],
torch._C._special.special_shifted_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_shifted_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_shifted_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}],
torch._C._special.special_shifted_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_shifted_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_shifted_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}],
torch._C._special.special_shifted_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_shifted_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_shifted_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}],
torch._C._special.special_shifted_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_shifted_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_shifted_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}],
torch._C._special.special_shifted_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_shifted_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}],
torch._C._special.special_shifted_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}],
torch._C._special.special_spherical_bessel_j0: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}],
torch._C._special.special_spherical_bessel_j0: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}],
torch._C._fft.fft_fft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_fft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_ifft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_ifft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_rfft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_rfft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_irfft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_irfft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_hfft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_hfft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_ihfft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_ihfft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_fft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_fft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_ifft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_ifft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_rfft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_rfft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_irfft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_irfft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_hfft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_hfft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_ihfft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_ihfft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_fftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_fftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_ifftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_ifftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_rfftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_rfftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_irfftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_irfftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_hfftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_hfftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_ihfftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_ihfftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_fftfreq: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}],
torch._C._fft.fft_fftfreq: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}],
torch._C._fft.fft_rfftfreq: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}],
torch._C._fft.fft_rfftfreq: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}],
torch._C._fft.fft_fftshift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch._C._fft.fft_ifftshift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.retain_grad: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.rename_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'names', 'simple_type': 'DimnameList?'}],
torch.Tensor.rename: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'names', 'simple_type': 'DimnameList?'}],
torch.Tensor.align_to: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'names', 'simple_type': 'DimnameList'}],
torch.Tensor.align_to: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'order', 'simple_type': 'DimnameList'}, {'is_kwarg_only': 'False', 'name': 'ellipsis_idx', 'simple_type': 'int64_t'}],
torch.Tensor.align_as: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.refine_names: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'names', 'simple_type': 'DimnameList'}],
torch.Tensor.abs: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.abs_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.absolute: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.absolute_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.angle: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.sgn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.sgn_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.chalf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor._conj: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.conj: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor._conj_physical: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.conj_physical: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.conj_physical_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.resolve_conj: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.resolve_neg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor._neg_view: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.acos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.acos_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.arccos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.arccos_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.add_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.addmv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec', 'simple_type': 'Tensor'}],
torch.Tensor.addmv_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec', 'simple_type': 'Tensor'}],
torch.Tensor.addr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}],
torch.Tensor.addr_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}],
torch.Tensor._is_all_true: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor._is_any_true: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch.Tensor.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch.Tensor.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.allclose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch.Tensor.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch.Tensor.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.argmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.argmin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.acosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.acosh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.arccosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.arccosh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.asinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.asinh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.arcsinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.arcsinh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.atanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.atanh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.arctanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.arctanh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.as_strided: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}],
torch.Tensor.as_strided_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}],
torch.Tensor.asin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.asin_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.arcsin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.arcsin_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.atan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.atan_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.arctan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.arctan_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.baddbmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch2', 'simple_type': 'Tensor'}],
torch.Tensor.baddbmm_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch2', 'simple_type': 'Tensor'}],
torch.Tensor.bernoulli: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.bernoulli: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}],
torch.Tensor.bernoulli_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Tensor'}],
torch.Tensor.bernoulli_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.bincount: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.bitwise_not: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.bitwise_not_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.copysign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.copysign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.copysign_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.copysign_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor._lazy_clone: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.logical_not: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.logical_not_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.logical_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.logical_xor_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.logical_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.logical_and_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.logical_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.logical_or_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.bmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}],
torch.Tensor.broadcast_to: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch.Tensor.ceil: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.ceil_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.unsafe_chunk: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'chunks', 'simple_type': 'int64_t'}],
torch.Tensor.chunk: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'chunks', 'simple_type': 'int64_t'}],
torch.Tensor.tensor_split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sections', 'simple_type': 'SymInt'}],
torch.Tensor.tensor_split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'SymIntArrayRef'}],
torch.Tensor.tensor_split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor_indices_or_sections', 'simple_type': 'Tensor'}],
torch.Tensor.clamp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.clamp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.clamp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.clamp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.clamp_max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Scalar'}],
torch.Tensor.clamp_max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Tensor'}],
torch.Tensor.clamp_max_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Scalar'}],
torch.Tensor.clamp_max_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Tensor'}],
torch.Tensor.clamp_min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Scalar'}],
torch.Tensor.clamp_min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Tensor'}],
torch.Tensor.clamp_min_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Scalar'}],
torch.Tensor.clamp_min_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Tensor'}],
torch.Tensor.clip: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.clip: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.clip_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.clip_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.cos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.cos_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.cosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.cosh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.count_nonzero: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}],
torch.Tensor.count_nonzero: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.cov: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.corrcoef: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.cummax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch.Tensor.cummax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch.Tensor.cummin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch.Tensor.cummin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch.Tensor.cumprod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch.Tensor.cumprod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch.Tensor.cumprod_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch.Tensor.cumprod_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch.Tensor.cumsum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch.Tensor.cumsum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch.Tensor.cumsum_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch.Tensor.cumsum_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch.Tensor.diag_embed: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.diagflat: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.diagonal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.diagonal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.fill_diagonal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'fill_value', 'simple_type': 'Scalar'}],
torch.Tensor.diff: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}],
torch.Tensor.div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}],
torch.Tensor.div_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.div_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}],
torch.Tensor.div_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}],
torch.Tensor.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}],
torch.Tensor.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}],
torch.Tensor.divide_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.divide_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.divide_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}],
torch.Tensor.divide_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}],
torch.Tensor.true_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.true_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.true_divide_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.true_divide_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.dot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor', 'simple_type': 'Tensor'}],
torch.Tensor.vdot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.new_empty: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch.Tensor.new_empty_strided: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}],
torch.Tensor.new_full: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'fill_value', 'simple_type': 'Scalar'}],
torch.Tensor.new_zeros: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch.Tensor.new_ones: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch.Tensor.resize_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch.Tensor.erf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.erf_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.erfc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.erfc_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.exp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.exp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.exp2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.exp2_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.expm1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.expm1_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.expand: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch.Tensor.expand_as: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.flatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.flatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'start_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'end_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'out_dim', 'simple_type': 'Dimname'}],
torch.Tensor.flatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'start_dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'end_dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'out_dim', 'simple_type': 'Dimname'}],
torch.Tensor.flatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'DimnameList'}, {'is_kwarg_only': 'False', 'name': 'out_dim', 'simple_type': 'Dimname'}],
torch.Tensor.unflatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'sizes', 'simple_type': 'SymIntArrayRef'}],
torch.Tensor.unflatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'sizes', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'names', 'simple_type': 'DimnameList'}],
torch.Tensor.fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}],
torch.Tensor.fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}],
torch.Tensor.floor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.floor_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.floor_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.floor_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.floor_divide_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.floor_divide_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.frac: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.frac_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.gcd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.gcd_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.lcm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.lcm_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.index_copy_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}],
torch.Tensor.index_copy_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}],
torch.Tensor.index_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}],
torch.Tensor.index_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}],
torch.Tensor.index_put_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'c10::List<::std::optional<Tensor>>'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}],
torch.Tensor.index_put: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'c10::List<::std::optional<Tensor>>'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}],
torch.Tensor.isclose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.isnan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.is_distributed: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.is_floating_point: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.is_complex: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.is_conj: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor._is_zerotensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.is_neg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.isreal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.is_nonzero: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.is_same_size: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.is_signed: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.is_inference: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.kron: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.kthvalue: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'int64_t'}],
torch.Tensor.kthvalue: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch.Tensor.nan_to_num: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.nan_to_num_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.ldexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.ldexp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.log: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.log_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.log10: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.log10_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.log1p: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.log1p_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.log2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.log2_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.logaddexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.logaddexp2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.xlogy_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.xlogy_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.log_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch.Tensor.log_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch.Tensor.logcumsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch.Tensor.logcumsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch.Tensor.logsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}],
torch.Tensor.logsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}],
torch.Tensor.matmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.matrix_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}],
torch.Tensor.matrix_exp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.aminmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch.Tensor.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch.Tensor.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.amax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}],
torch.Tensor.mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}],
torch.Tensor.nanmean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.median: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.median: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch.Tensor.median: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch.Tensor.nanmedian: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.nanmedian: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch.Tensor.nanmedian: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch.Tensor.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch.Tensor.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch.Tensor.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.amin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.mm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}],
torch.Tensor.mode: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.mode: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch.Tensor.mul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.mul_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.multiply: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.multiply: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.multiply_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.multiply_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.mv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec', 'simple_type': 'Tensor'}],
torch.Tensor.mvlgamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'int64_t'}],
torch.Tensor.mvlgamma_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'int64_t'}],
torch.Tensor.narrow_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'length', 'simple_type': 'SymInt'}],
torch.Tensor.narrow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'length', 'simple_type': 'SymInt'}],
torch.Tensor.narrow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'length', 'simple_type': 'SymInt'}],
torch.Tensor.permute: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'IntArrayRef'}],
torch.Tensor.movedim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'destination', 'simple_type': 'IntArrayRef'}],
torch.Tensor.movedim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'destination', 'simple_type': 'int64_t'}],
torch.Tensor.moveaxis: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'destination', 'simple_type': 'IntArrayRef'}],
torch.Tensor.moveaxis: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'destination', 'simple_type': 'int64_t'}],
torch.Tensor.adjoint: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.is_pinned: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.pin_memory: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.pinverse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.rad2deg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.rad2deg_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.deg2rad: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.deg2rad_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.ravel: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.reciprocal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.reciprocal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.neg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.neg_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.negative: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.negative_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.repeat: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'repeats', 'simple_type': 'SymIntArrayRef'}],
torch.Tensor.repeat_interleave: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'repeats', 'simple_type': 'Tensor'}],
torch.Tensor.repeat_interleave: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'repeats', 'simple_type': 'SymInt'}],
torch.Tensor.reshape: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'shape', 'simple_type': 'SymIntArrayRef'}],
torch.Tensor.reshape_as: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.round: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.round: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.round_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.round_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.relu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.prelu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}],
torch.Tensor.hardshrink: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.rsqrt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.rsqrt_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'int64_t'}],
torch.Tensor.select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'SymInt'}],
torch.Tensor.sigmoid: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.sigmoid_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.logit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.logit_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.sin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.sin_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.sinc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.sinc_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.sinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.sinh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.detach: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.detach_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.slice_inverse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}],
torch.Tensor.slice_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}],
torch.Tensor.select_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'SymInt'}],
torch.Tensor.diagonal_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}],
torch.Tensor.as_strided_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}],
torch.Tensor.smm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}],
torch.Tensor.softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch.Tensor.softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch.Tensor.unsafe_split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_size', 'simple_type': 'SymInt'}],
torch.Tensor.split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_size', 'simple_type': 'SymInt'}],
torch.Tensor.split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_size', 'simple_type': 'SymIntArrayRef'}],
torch.Tensor.unsafe_split_with_sizes: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_sizes', 'simple_type': 'SymIntArrayRef'}],
torch.Tensor.split_with_sizes: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_sizes', 'simple_type': 'SymIntArrayRef'}],
torch.Tensor.hsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sections', 'simple_type': 'int64_t'}],
torch.Tensor.hsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'IntArrayRef'}],
torch.Tensor.vsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sections', 'simple_type': 'int64_t'}],
torch.Tensor.vsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'IntArrayRef'}],
torch.Tensor.dsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sections', 'simple_type': 'int64_t'}],
torch.Tensor.dsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'IntArrayRef'}],
torch.Tensor.squeeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.squeeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch.Tensor.squeeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch.Tensor.squeeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}],
torch.Tensor.squeeze_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.squeeze_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch.Tensor.squeeze_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}],
torch.Tensor.squeeze_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch.Tensor.sspaddmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}],
torch.Tensor.stft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n_fft', 'simple_type': 'int64_t'}],
torch.Tensor.stft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n_fft', 'simple_type': 'int64_t'}],
torch.Tensor.istft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n_fft', 'simple_type': 'int64_t'}],
torch.Tensor.sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}],
torch.Tensor.sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}],
torch.Tensor.nansum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.sum_to_size: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch.Tensor.sqrt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.sqrt_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.square: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.square_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}],
torch.Tensor.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}],
torch.Tensor.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}],
torch.Tensor.prod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.prod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch.Tensor.prod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch.Tensor.t: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.t_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.tan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.tan_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.tanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.tanh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.tile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'SymIntArrayRef'}],
torch.Tensor.transpose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}],
torch.Tensor.transpose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'Dimname'}],
torch.Tensor.transpose_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}],
torch.Tensor.flip: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'IntArrayRef'}],
torch.Tensor.fliplr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.flipud: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.roll: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'shifts', 'simple_type': 'SymIntArrayRef', 'size': 1}],
torch.Tensor.rot90: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor._nested_tensor_size: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor._nested_tensor_strides: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor._nested_tensor_storage_offsets: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.trunc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.trunc_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.fix: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.fix_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.type_as: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.unsqueeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch.Tensor.unsqueeze_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}],
torch.Tensor.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}],
torch.Tensor.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}],
torch.Tensor.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}],
torch.Tensor.view_as: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.where: [{'is_kwarg_only': 'False', 'name': 'condition', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.where: [{'is_kwarg_only': 'False', 'name': 'condition', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}],
torch.Tensor.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}, {'is_kwarg_only': 'False', 'name': 'keepdim', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}],
torch.Tensor.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}],
torch.Tensor.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}, {'is_kwarg_only': 'False', 'name': 'keepdim', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}],
torch.Tensor.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}],
torch.Tensor.frexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.clone: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.positive: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.resize_as_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'the_template', 'simple_type': 'Tensor'}],
torch.Tensor.resize_as_sparse_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'the_template', 'simple_type': 'Tensor'}],
torch.Tensor.zero_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.sub: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.sub_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.subtract: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.subtract: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.subtract_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.subtract_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.heaviside: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}],
torch.Tensor.heaviside_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}],
torch.Tensor.addmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}],
torch.Tensor.addmm_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}],
torch.Tensor._addmm_activation: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}],
torch.Tensor.sparse_resize_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'sparse_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dense_dim', 'simple_type': 'int64_t'}],
torch.Tensor.sparse_resize_and_clear_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'sparse_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dense_dim', 'simple_type': 'int64_t'}],
torch.Tensor.sparse_mask: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}],
torch.Tensor._sparse_mask_projection: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}],
torch.Tensor.to_dense: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor._to_dense: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.sparse_dim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor._dimI: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.dense_dim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor._dimV: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor._nnz: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.coalesce: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.is_coalesced: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor._indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor._values: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor._coalesced_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'coalesced', 'simple_type': 'bool'}],
torch.Tensor.indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.values: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.crow_indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.col_indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.ccol_indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.row_indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.unbind: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.unbind: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch.Tensor.to_sparse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sparse_dim', 'simple_type': 'int64_t'}],
torch.Tensor.to_sparse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor._to_sparse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sparse_dim', 'simple_type': 'int64_t'}],
torch.Tensor._to_sparse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.to_sparse_csr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor._to_sparse_csr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.to_sparse_csc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor._to_sparse_csc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.to_sparse_bsr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'blocksize', 'simple_type': 'IntArrayRef', 'size': 2}],
torch.Tensor._to_sparse_bsr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'blocksize', 'simple_type': 'IntArrayRef', 'size': 2}],
torch.Tensor.to_sparse_bsc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'blocksize', 'simple_type': 'IntArrayRef', 'size': 2}],
torch.Tensor._to_sparse_bsc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'blocksize', 'simple_type': 'IntArrayRef', 'size': 2}],
torch.Tensor.to_mkldnn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.dequantize: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.q_scale: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.q_zero_point: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.q_per_channel_scales: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.q_per_channel_zero_points: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.q_per_channel_axis: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.int_repr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.qscheme: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor._autocast_to_reduced_precision: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'cuda_enabled', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'cpu_enabled', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'cuda_dtype', 'simple_type': 'ScalarType'}, {'is_kwarg_only': 'False', 'name': 'cpu_dtype', 'simple_type': 'ScalarType'}],
torch.Tensor._autocast_to_full_precision: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'cuda_enabled', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'cpu_enabled', 'simple_type': 'bool'}],
torch.Tensor.is_set_to: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor', 'simple_type': 'Tensor'}],
torch.Tensor.masked_fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}],
torch.Tensor.masked_fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}],
torch.Tensor.masked_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}],
torch.Tensor.masked_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}],
torch.Tensor.masked_scatter_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}],
torch.Tensor.masked_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}],
torch.Tensor.view: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}],
torch.Tensor.view: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType'}],
torch.Tensor.put_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}],
torch.Tensor.put: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}],
torch.Tensor.index_add_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}],
torch.Tensor.index_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}],
torch.Tensor.index_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}],
torch.Tensor.index_reduce_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reduce', 'simple_type': 'c10::string_view'}],
torch.Tensor.index_reduce: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reduce', 'simple_type': 'c10::string_view'}],
torch.Tensor.index_fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}],
torch.Tensor.index_fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}],
torch.Tensor.index_fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}],
torch.Tensor.index_fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}],
torch.Tensor.index_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}],
torch.Tensor.index_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}],
torch.Tensor.index_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}],
torch.Tensor.index_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}],
torch.Tensor.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}],
torch.Tensor.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}],
torch.Tensor.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'reduce', 'simple_type': 'c10::string_view'}],
torch.Tensor.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'reduce', 'simple_type': 'c10::string_view'}],
torch.Tensor.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}],
torch.Tensor.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}],
torch.Tensor.scatter_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}],
torch.Tensor.scatter_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}],
torch.Tensor.scatter_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}],
torch.Tensor.scatter_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}],
torch.Tensor.scatter_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}],
torch.Tensor.scatter_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}],
torch.Tensor.scatter_add_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}],
torch.Tensor.scatter_reduce: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reduce', 'simple_type': 'c10::string_view'}],
torch.Tensor.scatter_reduce_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reduce', 'simple_type': 'c10::string_view'}],
torch.Tensor.eq_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.eq_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.bitwise_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.bitwise_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.bitwise_and_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.bitwise_and_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.__and__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.__and__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.__iand__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.__iand__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.bitwise_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.bitwise_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.bitwise_or_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.bitwise_or_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.__or__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.__or__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.__ior__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.__ior__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.bitwise_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.bitwise_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.bitwise_xor_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.bitwise_xor_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.__xor__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.__xor__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.__ixor__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.__ixor__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.__lshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.__lshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.__ilshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.__ilshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.bitwise_left_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.bitwise_left_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.bitwise_left_shift_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.bitwise_left_shift_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.__rshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.__rshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.__irshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.__irshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.bitwise_right_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.bitwise_right_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.bitwise_right_shift_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.bitwise_right_shift_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.tril_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.triu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.digamma_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.lerp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Scalar'}],
torch.Tensor.lerp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}],
torch.Tensor.addbmm_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch2', 'simple_type': 'Tensor'}],
torch.Tensor.addbmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch2', 'simple_type': 'Tensor'}],
torch.Tensor.random_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'from', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'to', 'simple_type': 'int64_t?'}],
torch.Tensor.random_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'to', 'simple_type': 'int64_t'}],
torch.Tensor.random_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.uniform_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.cauchy_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.log_normal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.exponential_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.geometric_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}],
torch.Tensor.diag: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.cross: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.triu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.tril: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.trace: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.ne: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.ne: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.ne_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.ne_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.not_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.not_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.not_equal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.not_equal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.eq: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.eq: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.ge: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.ge: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.ge_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.ge_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.greater_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.greater_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.greater_equal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.greater_equal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.le: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.le: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.le_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.le_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.less_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.less_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.less_equal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.less_equal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.gt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.gt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.gt_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.gt_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.greater: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.greater: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.greater_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.greater_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.lt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.lt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.lt_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.lt_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.less: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.less: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.less_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.less_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.take: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}],
torch.Tensor.take_along_dim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}],
torch.Tensor.index_select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}],
torch.Tensor.index_select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}],
torch.Tensor.masked_select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}],
torch.Tensor.nonzero_static: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'size', 'simple_type': 'SymInt'}],
torch.Tensor.argwhere: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.gather: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}],
torch.Tensor.gather: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}],
torch.Tensor.addcmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'Tensor'}],
torch.Tensor.addcmul_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'Tensor'}],
torch.Tensor.addcdiv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'Tensor'}],
torch.Tensor.addcdiv_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'Tensor'}],
torch.Tensor.triangular_solve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}],
torch.Tensor.svd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.swapaxes: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'axis0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'axis1', 'simple_type': 'int64_t'}],
torch.Tensor.swapaxes_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'axis0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'axis1', 'simple_type': 'int64_t'}],
torch.Tensor.swapdims: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}],
torch.Tensor.swapdims_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}],
torch.Tensor.cholesky: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.cholesky_solve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}],
torch.Tensor.cholesky_inverse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.qr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.geqrf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.orgqr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}],
torch.Tensor.ormqr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input3', 'simple_type': 'Tensor'}],
torch.Tensor.lu_solve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'LU_data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'LU_pivots', 'simple_type': 'Tensor'}],
torch.Tensor.multinomial: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'num_samples', 'simple_type': 'int64_t'}],
torch.Tensor.lgamma_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.lgamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.digamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.polygamma: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.polygamma_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}],
torch.Tensor.erfinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.erfinv_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.i0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.i0_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.sign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.sign_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.signbit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.dist: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.atan2_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.atan2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.arctan2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.arctan2_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.lerp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Scalar'}],
torch.Tensor.lerp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}],
torch.Tensor.histc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.histogram: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bins', 'simple_type': 'Tensor'}],
torch.Tensor.histogram: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.fmod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.fmod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.fmod_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.fmod_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.hypot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.hypot_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.igamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.igamma_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.igammac: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.igammac_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.nextafter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.nextafter_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.remainder: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.remainder: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.remainder_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}],
torch.Tensor.remainder_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.fmin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.fmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.maximum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.minimum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.quantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'Tensor'}],
torch.Tensor.quantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'double'}],
torch.Tensor.nanquantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'Tensor'}],
torch.Tensor.nanquantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'double'}],
torch.Tensor.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'stable', 'simple_type': 'bool?'}],
torch.Tensor.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch.Tensor.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'stable', 'simple_type': 'bool?'}, {'is_kwarg_only': 'True', 'name': 'dim', 'simple_type': 'Dimname'}],
torch.Tensor.msort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.argsort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.argsort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'stable', 'simple_type': 'bool'}],
torch.Tensor.argsort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}],
torch.Tensor.topk: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'SymInt'}],
torch.Tensor.renorm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'maxnorm', 'simple_type': 'Scalar'}],
torch.Tensor.renorm_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'maxnorm', 'simple_type': 'Scalar'}],
torch.Tensor.unfold: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dimension', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'step', 'simple_type': 'int64_t'}],
torch.Tensor.equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}],
torch.Tensor.pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}],
torch.Tensor.pow_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}],
torch.Tensor.pow_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}],
torch.Tensor.float_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}],
torch.Tensor.float_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}],
torch.Tensor.float_power_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}],
torch.Tensor.float_power_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}],
torch.Tensor.normal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.isfinite: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.isinf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.record_stream: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 's', 'simple_type': 'Stream'}],
torch.Tensor.isposinf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.isneginf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.det: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.slogdet: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.logdet: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.inverse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}],
torch.Tensor.inner: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}],
torch.Tensor.outer: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}],
torch.Tensor.ger: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}],
torch.Tensor.to_padded_tensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'double'}],
}
```
|
=========================================================================================================================
SOURCE CODE FILE: hop_db.py
LINES: 1
SIZE: 13.97 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\hop_db.py
ENCODING: utf-8
```py
# mypy: ignore-errors
import functools
import unittest
import torch
from functorch.experimental.control_flow import map
from torch.nn.attention.flex_attention import _create_empty_block_mask, flex_attention
from torch.testing import make_tensor
from torch.testing._internal.common_device_type import onlyCUDA
from torch.testing._internal.common_dtype import all_types_and, custom_types
from torch.testing._internal.opinfo.core import DecorateInfo, OpInfo, SampleInput
from torch._higher_order_ops.invoke_subgraph import mark_compile_region
from torch._higher_order_ops import InvokeQuant, invoke_quant_packed
def sample_inputs_map(opinfo, device, dtype, requires_grad, **kwargs):
make_arg = functools.partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad
)
yield SampleInput(
[make_arg(2, 2, 2, low=0.1, high=2), make_arg(2, 2, 2, low=0.1, high=2)],
args=(make_arg(1, low=0.1, high=2), make_arg(1, low=0.1, high=2)),
)
def inner_f(x, y0, y1):
return [x[0].cos().add_(1.0) * y0, (x[1] + y1.sin()).cos_().view(x[1].size())]
def simple_map(xs, y0, y1):
def f(x, y0, y1):
return inner_f(x, y0, y1)
return map(f, xs, y0, y1)
def nested_map(xs, y0, y1):
def f1(xx, y0, y1):
def f2(x, y0, y1):
return inner_f(x, y0, y1)
return map(f2, xx, y0, y1)
return map(f1, xs, y0, y1)
def triple_nested_map(xs, y0, y1):
def f0(xs, y0, y1):
def f1(xx, y0, y1):
def f2(x, y0, y1):
return inner_f(x, y0, y1)
return map(f2, xx, y0, y1)
return map(f1, xs, y0, y1)
return map(f0, xs, y0, y1)
# PLEASE DON'T ADD ANYTHING NEW TO THIS LIST,
# and do add an OpInfo for your HOP.
# The OpInfo lets us do automated testing for the HOP to check that
# your HOP will work correctly with PyTorch!
#
# Your new HOP may fail some automated testing. That's OK. If you don't
# care about certain features (like torch.export), it's fine to xfail those
# failing tests. It is less fine to xfail a more critical check (like checking
# if torch.compile works with your HOP, or if your HOP has a docstring).
# If you don't know if a test is fine to xfail, please ask.
#
# There are legitimate reasons why something cannot be added to this list
# (e.g. it uses executorch which is not in PyTorch). If that's the case then
# please leave a comment.
FIXME_hop_that_doesnt_have_opinfo_test_allowlist = [
"custom_function_call",
"autograd_function_apply",
"run_and_save_rng_state",
"run_with_rng_state",
"graphsafe_run_with_rng_state",
"out_dtype",
"trace_wrapped",
'tag_activation_checkpoint',
'executorch_call_delegate',
'wrap',
'wrap_with_set_grad_enabled',
'auto_functionalized_v2',
'associative_scan',
'flat_apply', # is WIP, doesn't pass any of the tests yet
'wrap_with_autocast',
'wrap_activation_checkpoint',
'run_const_graph',
'auto_functionalized',
"map", # T183144629
"map_impl",
"with_effects",
"strict_mode",
"_export_tracepoint",
"call_torchbind",
"triton_kernel_wrapper_mutation",
"triton_kernel_wrapper_functional",
"hints_wrapper",
"foreach_map",
"aoti_call_delegate",
]
torch.library.define(
"testlib::mutating_custom_op",
"(Tensor(a!) x, Tensor(b!) z) -> (Tensor, Tensor, Tensor)",
tags=torch.Tag.pt2_compliant_tag,
)
@torch.library.impl("testlib::mutating_custom_op", "cpu")
def foo_impl_cpu(x, z):
x.add_(5)
z.add_(5)
return x, z, x + z
@torch.library.impl("testlib::mutating_custom_op", "cuda")
def foo_impl_cuda(x, z):
x.add_(5)
z.add_(5)
return x, z, x + z
@torch.library.register_fake("testlib::mutating_custom_op")
def foo_impl_abstract(x, z):
return x, z, x + z
def sample_inputs_cond(opinfo, device, dtype, requires_grad, **kwargs):
make_arg = functools.partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad
)
yield SampleInput(make_arg(2, 2, 2, low=0.1, high=2))
def simple_cond(x):
return torch.cond(x.sum() > 2, lambda x: (x.cos(),), lambda x: (x.sin(),), [x])
def sample_inputs_invoke_subgraph(opinfo, device, dtype, requires_grad, **kwargs):
make_arg = functools.partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad
)
yield SampleInput(make_arg(2, 2, 2, low=0.1, high=2))
@mark_compile_region
def fn_for_invoke_subgraph(x):
return torch.sin(x)
def simple_invoke_subgraph(x):
return fn_for_invoke_subgraph(x)
def sample_inputs_auto_functionalize(opinfo, device, dtype, requires_grad, **kwargs):
make_arg = functools.partial(
make_tensor, device=device, dtype=dtype, requires_grad=False
)
yield SampleInput(
make_arg(2, 2, 2, low=0.1, high=2), make_arg(2, 2, 2, low=0.1, high=2)
)
def simple_auto_functionalize(x, z):
return torch.ops.testlib.mutating_custom_op(x, z)
def sample_inputs_flex_attention(opinfo, device, dtype, requires_grad, **kwargs):
make_arg = functools.partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad
)
def score_mod(score, b, h, m, n):
return score + h
q, k, v = (make_arg(2, 2, 128, 8, low=0.1, high=2) for _ in range(3))
block_mask = _create_empty_block_mask(q, k)
yield SampleInput(q, k, v, score_mod, block_mask)
def sample_inputs_while_loop(opinfo, device, dtype, requires_grad, **kwargs):
make_arg = functools.partial(
make_tensor, device=device, dtype=dtype, requires_grad=False
)
yield SampleInput(
torch.tensor(3),
make_arg(2, 3, 4, low=0.1, high=2),
)
def simple_while_loop(iter_t, x):
def cond_fn(iter_t, x):
return iter_t > 0
def body_fn(iter_t, x):
return iter_t - 1, x.cos()
return torch._higher_order_ops.while_loop(cond_fn, body_fn, (iter_t, x))
def sample_inputs_scan(opinfo, device, dtype, requires_grad, **kwargs):
make_arg = functools.partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad
)
yield SampleInput(
make_arg(2, 2, low=0.1, high=2),
make_arg(2, 2, 2, low=0.1, high=2),
)
def simple_scan(init, xs):
def combine_fn(carry, x):
result = carry @ x + x
return result, carry.clone()
return torch._higher_order_ops.scan(combine_fn, init, xs)
quant_tracer = InvokeQuant()
def simple_invoke_quant(x):
def fn(x, y):
return (torch.sin(x) * y,)
return quant_tracer(fn, x, x)[0] * 2.
def simple_invoke_quant_packed(x):
def fn(x):
return (torch.sin(x),)
return invoke_quant_packed(fn, x)[0] * 2.
hop_db = [
OpInfo(
name="scan",
variant_test_name="simple",
op=simple_scan,
sample_inputs_func=sample_inputs_scan,
dtypes=all_types_and(torch.bool, torch.half),
supports_out=False,
check_batched_grad=False,
check_batched_gradgrad=False,
check_batched_forward_grad=False,
check_inplace_batched_forward_grad=False,
supports_autograd=False,
# "torch.compile with aot_autograd does not currently support double backward."
supports_gradgrad=False,
),
OpInfo(
name="invoke_subgraph",
variant_test_name="simple",
op=simple_invoke_subgraph,
sample_inputs_func=sample_inputs_invoke_subgraph,
dtypes=all_types_and(torch.bool, torch.half),
supports_out=False,
check_batched_grad=False,
check_batched_gradgrad=False,
check_batched_forward_grad=False,
check_inplace_batched_forward_grad=False,
supports_autograd=True,
# "torch.compile with aot_autograd does not currently support double backward."
supports_gradgrad=False,
),
OpInfo(
name="map",
variant_test_name="simple",
op=simple_map,
sample_inputs_func=sample_inputs_map,
dtypes=all_types_and(torch.bool, torch.half),
supports_out=False,
check_batched_grad=False,
check_batched_gradgrad=False,
check_batched_forward_grad=False,
check_inplace_batched_forward_grad=False,
),
OpInfo(
name="map",
variant_test_name="nested",
op=nested_map,
sample_inputs_func=sample_inputs_map,
dtypes=all_types_and(torch.bool, torch.half),
supports_out=False,
check_batched_grad=False,
check_batched_gradgrad=False,
check_batched_forward_grad=False,
check_inplace_batched_forward_grad=False,
),
OpInfo(
name="map",
variant_test_name="triple_nested",
op=triple_nested_map,
sample_inputs_func=sample_inputs_map,
dtypes=all_types_and(torch.bool, torch.half),
supports_out=False,
check_batched_grad=False,
check_batched_gradgrad=False,
check_batched_forward_grad=False,
check_inplace_batched_forward_grad=False,
),
OpInfo(
name="cond",
variant_test_name="simple",
op=simple_cond,
sample_inputs_func=sample_inputs_cond,
dtypes=all_types_and(torch.bool, torch.half),
supports_out=False,
check_batched_grad=False,
check_batched_gradgrad=False,
check_batched_forward_grad=False,
check_inplace_batched_forward_grad=False,
supports_autograd=True,
# "torch.compile with aot_autograd does not currently support double backward."
supports_gradgrad=False,
),
OpInfo(
name="invoke_quant",
variant_test_name="simple",
op=simple_invoke_quant,
sample_inputs_func=sample_inputs_invoke_subgraph,
dtypes=all_types_and(torch.bool, torch.half),
supports_out=False,
check_batched_grad=False,
check_batched_gradgrad=False,
check_batched_forward_grad=False,
check_inplace_batched_forward_grad=False,
supports_autograd=True,
# "torch.compile with aot_autograd does not currently support double backward."
skips=(
DecorateInfo(unittest.expectedFailure, "TestHOP", "test_aot_export"),
DecorateInfo(
unittest.expectedFailure, "TestHOP", "test_pre_dispatch_export"
),
DecorateInfo(unittest.expectedFailure, "TestHOP", "test_serialize_export"),
DecorateInfo(unittest.expectedFailure, "TestHOP", "test_retrace_export"),
),
# "torch.compile with aot_autograd does not currently support double backward."
supports_gradgrad=False,
),
OpInfo(
name="invoke_quant_packed",
variant_test_name="simple",
op=simple_invoke_quant_packed,
sample_inputs_func=sample_inputs_invoke_subgraph,
dtypes=all_types_and(torch.bool, torch.half),
supports_out=False,
check_batched_grad=False,
check_batched_gradgrad=False,
check_batched_forward_grad=False,
check_inplace_batched_forward_grad=False,
supports_autograd=True,
# "torch.compile with aot_autograd does not currently support double backward."
supports_gradgrad=False,
),
OpInfo(
name="while_loop",
variant_test_name="simple",
op=simple_while_loop,
sample_inputs_func=sample_inputs_while_loop,
dtypes=all_types_and(torch.bool, torch.half),
supports_out=False,
check_batched_grad=False,
check_batched_gradgrad=False,
check_batched_forward_grad=False,
check_inplace_batched_forward_grad=False,
supports_autograd=False,
),
OpInfo(
name="auto_functionalize",
variant_test_name="simple",
op=simple_auto_functionalize,
sample_inputs_func=sample_inputs_auto_functionalize,
dtypes=all_types_and(torch.bool, torch.half),
supports_out=False,
check_batched_grad=False,
check_batched_gradgrad=False,
check_batched_forward_grad=False,
check_inplace_batched_forward_grad=False,
supports_autograd=False,
),
OpInfo(
name="flex_attention",
variant_test_name="simple",
op=flex_attention,
sample_inputs_func=sample_inputs_flex_attention,
dtypes=custom_types(torch.float16, torch.float32),
supports_out=False,
check_batched_grad=False,
check_batched_gradgrad=False,
check_batched_forward_grad=False,
check_inplace_batched_forward_grad=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestHOP", "test_aot_export"),
DecorateInfo(
unittest.expectedFailure, "TestHOP", "test_pre_dispatch_export"
),
DecorateInfo(unittest.expectedFailure, "TestHOP", "test_serialize_export"),
DecorateInfo(unittest.expectedFailure, "TestHOP", "test_retrace_export"),
),
decorators=[onlyCUDA],
),
OpInfo(
name="flex_attention_backward",
variant_test_name="simple",
op=flex_attention,
sample_inputs_func=sample_inputs_flex_attention,
dtypes=custom_types(torch.float16, torch.float32),
supports_out=False,
check_batched_grad=False,
check_batched_gradgrad=False,
check_batched_forward_grad=False,
check_inplace_batched_forward_grad=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestHOP", "test_aot_export"),
DecorateInfo(
unittest.expectedFailure, "TestHOP", "test_pre_dispatch_export"
),
DecorateInfo(unittest.expectedFailure, "TestHOP", "test_serialize_export"),
DecorateInfo(unittest.expectedFailure, "TestHOP", "test_retrace_export"),
),
decorators=[onlyCUDA],
),
]
```
|
===================================================================================================================================
SOURCE CODE FILE: hypothesis_utils.py
LINES: 1
SIZE: 14.64 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\hypothesis_utils.py
ENCODING: utf-8
```py
# mypy: ignore-errors
from collections import defaultdict
from collections.abc import Iterable
import numpy as np
import torch
import hypothesis
from functools import reduce
from hypothesis import assume
from hypothesis import settings
from hypothesis import strategies as st
from hypothesis.extra import numpy as stnp
from hypothesis.strategies import SearchStrategy
from torch.testing._internal.common_quantized import _calculate_dynamic_qparams, _calculate_dynamic_per_channel_qparams
# Setup for the hypothesis tests.
# The tuples are (torch_quantized_dtype, zero_point_enforce), where the last
# element is enforced zero_point. If None, any zero_point point within the
# range of the data type is OK.
# Tuple with all quantized data types.
_ALL_QINT_TYPES = (
torch.quint8,
torch.qint8,
torch.qint32,
)
# Enforced zero point for every quantized data type.
# If None, any zero_point point within the range of the data type is OK.
_ENFORCED_ZERO_POINT = defaultdict(lambda: None, {
torch.quint8: None,
torch.qint8: None,
torch.qint32: 0
})
def _get_valid_min_max(qparams):
scale, zero_point, _quantized_type = qparams
adjustment = 1 + torch.finfo(torch.float).eps
_long_type_info = torch.iinfo(torch.long)
long_min, long_max = _long_type_info.min / adjustment, _long_type_info.max / adjustment
# make sure intermediate results are within the range of long
min_value = max((long_min - zero_point) * scale, (long_min / scale + zero_point))
max_value = min((long_max - zero_point) * scale, (long_max / scale + zero_point))
return np.float32(min_value), np.float32(max_value)
# This wrapper wraps around `st.floats` and checks the version of `hypothesis`, if
# it is too old, removes the `width` parameter (which was introduced)
# in 3.67.0
def _floats_wrapper(*args, **kwargs):
if 'width' in kwargs and hypothesis.version.__version_info__ < (3, 67, 0):
# As long as nan, inf, min, max are not specified, reimplement the width
# parameter for older versions of hypothesis.
no_nan_and_inf = (
(('allow_nan' in kwargs and not kwargs['allow_nan']) or
'allow_nan' not in kwargs) and
(('allow_infinity' in kwargs and not kwargs['allow_infinity']) or
'allow_infinity' not in kwargs))
min_and_max_not_specified = (
len(args) == 0 and
'min_value' not in kwargs and
'max_value' not in kwargs
)
if no_nan_and_inf and min_and_max_not_specified:
if kwargs['width'] == 16:
kwargs['min_value'] = torch.finfo(torch.float16).min
kwargs['max_value'] = torch.finfo(torch.float16).max
elif kwargs['width'] == 32:
kwargs['min_value'] = torch.finfo(torch.float32).min
kwargs['max_value'] = torch.finfo(torch.float32).max
elif kwargs['width'] == 64:
kwargs['min_value'] = torch.finfo(torch.float64).min
kwargs['max_value'] = torch.finfo(torch.float64).max
kwargs.pop('width')
return st.floats(*args, **kwargs)
def floats(*args, **kwargs):
if 'width' not in kwargs:
kwargs['width'] = 32
return _floats_wrapper(*args, **kwargs)
"""Hypothesis filter to avoid overflows with quantized tensors.
Args:
tensor: Tensor of floats to filter
qparams: Quantization parameters as returned by the `qparams`.
Returns:
True
Raises:
hypothesis.UnsatisfiedAssumption
Note: This filter is slow. Use it only when filtering of the test cases is
absolutely necessary!
"""
def assume_not_overflowing(tensor, qparams):
min_value, max_value = _get_valid_min_max(qparams)
assume(tensor.min() >= min_value)
assume(tensor.max() <= max_value)
return True
"""Strategy for generating the quantization parameters.
Args:
dtypes: quantized data types to sample from.
scale_min / scale_max: Min and max scales. If None, set to 1e-3 / 1e3.
zero_point_min / zero_point_max: Min and max for the zero point. If None,
set to the minimum and maximum of the quantized data type.
Note: The min and max are only valid if the zero_point is not enforced
by the data type itself.
Generates:
scale: Sampled scale.
zero_point: Sampled zero point.
quantized_type: Sampled quantized type.
"""
@st.composite
def qparams(draw, dtypes=None, scale_min=None, scale_max=None,
zero_point_min=None, zero_point_max=None):
if dtypes is None:
dtypes = _ALL_QINT_TYPES
if not isinstance(dtypes, (list, tuple)):
dtypes = (dtypes,)
quantized_type = draw(st.sampled_from(dtypes))
_type_info = torch.iinfo(quantized_type)
qmin, qmax = _type_info.min, _type_info.max
# TODO: Maybe embed the enforced zero_point in the `torch.iinfo`.
_zp_enforced = _ENFORCED_ZERO_POINT[quantized_type]
if _zp_enforced is not None:
zero_point = _zp_enforced
else:
_zp_min = qmin if zero_point_min is None else zero_point_min
_zp_max = qmax if zero_point_max is None else zero_point_max
zero_point = draw(st.integers(min_value=_zp_min, max_value=_zp_max))
if scale_min is None:
scale_min = torch.finfo(torch.float).eps
if scale_max is None:
scale_max = torch.finfo(torch.float).max
scale = draw(floats(min_value=scale_min, max_value=scale_max, width=32))
return scale, zero_point, quantized_type
"""Strategy to create different shapes.
Args:
min_dims / max_dims: minimum and maximum rank.
min_side / max_side: minimum and maximum dimensions per rank.
Generates:
Possible shapes for a tensor, constrained to the rank and dimensionality.
Example:
# Generates 3D and 4D tensors.
@given(Q = qtensor(shapes=array_shapes(min_dims=3, max_dims=4))
some_test(self, Q):...
"""
@st.composite
def array_shapes(draw, min_dims=1, max_dims=None, min_side=1, max_side=None, max_numel=None):
"""Return a strategy for array shapes (tuples of int >= 1)."""
assert min_dims < 32
if max_dims is None:
max_dims = min(min_dims + 2, 32)
assert max_dims < 32
if max_side is None:
max_side = min_side + 5
candidate = st.lists(st.integers(min_side, max_side), min_size=min_dims, max_size=max_dims)
if max_numel is not None:
candidate = candidate.filter(lambda x: reduce(int.__mul__, x, 1) <= max_numel)
return draw(candidate.map(tuple))
"""Strategy for generating test cases for tensors.
The resulting tensor is in float32 format.
Args:
shapes: Shapes under test for the tensor. Could be either a hypothesis
strategy, or an iterable of different shapes to sample from.
elements: Elements to generate from for the returned data type.
If None, the strategy resolves to float within range [-1e6, 1e6].
qparams: Instance of the qparams strategy. This is used to filter the tensor
such that the overflow would not happen.
Generates:
X: Tensor of type float32. Note that NaN and +/-inf is not included.
qparams: (If `qparams` arg is set) Quantization parameters for X.
The returned parameters are `(scale, zero_point, quantization_type)`.
(If `qparams` arg is None), returns None.
"""
@st.composite
def tensor(draw, shapes=None, elements=None, qparams=None, dtype=np.float32):
if isinstance(shapes, SearchStrategy):
_shape = draw(shapes)
else:
_shape = draw(st.sampled_from(shapes))
if qparams is None:
if elements is None:
elements = floats(-1e6, 1e6, allow_nan=False, width=32)
X = draw(stnp.arrays(dtype=dtype, elements=elements, shape=_shape))
assume(not (np.isnan(X).any() or np.isinf(X).any()))
return X, None
qparams = draw(qparams)
if elements is None:
min_value, max_value = _get_valid_min_max(qparams)
elements = floats(min_value, max_value, allow_infinity=False,
allow_nan=False, width=32)
X = draw(stnp.arrays(dtype=dtype, elements=elements, shape=_shape))
# Recompute the scale and zero_points according to the X statistics.
scale, zp = _calculate_dynamic_qparams(X, qparams[2])
enforced_zp = _ENFORCED_ZERO_POINT.get(qparams[2], None)
if enforced_zp is not None:
zp = enforced_zp
return X, (scale, zp, qparams[2])
@st.composite
def per_channel_tensor(draw, shapes=None, elements=None, qparams=None):
if isinstance(shapes, SearchStrategy):
_shape = draw(shapes)
else:
_shape = draw(st.sampled_from(shapes))
if qparams is None:
if elements is None:
elements = floats(-1e6, 1e6, allow_nan=False, width=32)
X = draw(stnp.arrays(dtype=np.float32, elements=elements, shape=_shape))
assume(not (np.isnan(X).any() or np.isinf(X).any()))
return X, None
qparams = draw(qparams)
if elements is None:
min_value, max_value = _get_valid_min_max(qparams)
elements = floats(min_value, max_value, allow_infinity=False,
allow_nan=False, width=32)
X = draw(stnp.arrays(dtype=np.float32, elements=elements, shape=_shape))
# Recompute the scale and zero_points according to the X statistics.
scale, zp = _calculate_dynamic_per_channel_qparams(X, qparams[2])
enforced_zp = _ENFORCED_ZERO_POINT.get(qparams[2], None)
if enforced_zp is not None:
zp = enforced_zp
# Permute to model quantization along an axis
axis = int(np.random.randint(0, X.ndim, 1))
permute_axes = np.arange(X.ndim)
permute_axes[0] = axis
permute_axes[axis] = 0
X = np.transpose(X, permute_axes)
return X, (scale, zp, axis, qparams[2])
"""Strategy for generating test cases for tensors used in Conv.
The resulting tensors is in float32 format.
Args:
spatial_dim: Spatial Dim for feature maps. If given as an iterable, randomly
picks one from the pool to make it the spatial dimension
batch_size_range: Range to generate `batch_size`.
Must be tuple of `(min, max)`.
input_channels_per_group_range:
Range to generate `input_channels_per_group`.
Must be tuple of `(min, max)`.
output_channels_per_group_range:
Range to generate `output_channels_per_group`.
Must be tuple of `(min, max)`.
feature_map_range: Range to generate feature map size for each spatial_dim.
Must be tuple of `(min, max)`.
kernel_range: Range to generate kernel size for each spatial_dim. Must be
tuple of `(min, max)`.
max_groups: Maximum number of groups to generate.
elements: Elements to generate from for the returned data type.
If None, the strategy resolves to float within range [-1e6, 1e6].
qparams: Strategy for quantization parameters. for X, w, and b.
Could be either a single strategy (used for all) or a list of
three strategies for X, w, b.
Generates:
(X, W, b, g): Tensors of type `float32` of the following drawen shapes:
X: (`batch_size, input_channels, H, W`)
W: (`output_channels, input_channels_per_group) + kernel_shape
b: `(output_channels,)`
groups: Number of groups the input is divided into
Note: X, W, b are tuples of (Tensor, qparams), where qparams could be either
None or (scale, zero_point, quantized_type)
Example:
@given(tensor_conv(
spatial_dim=2,
batch_size_range=(1, 3),
input_channels_per_group_range=(1, 7),
output_channels_per_group_range=(1, 7),
feature_map_range=(6, 12),
kernel_range=(3, 5),
max_groups=4,
elements=st.floats(-1.0, 1.0),
qparams=qparams()
))
"""
@st.composite
def tensor_conv(
draw, spatial_dim=2, batch_size_range=(1, 4),
input_channels_per_group_range=(3, 7),
output_channels_per_group_range=(3, 7), feature_map_range=(6, 12),
kernel_range=(3, 7), max_groups=1, can_be_transposed=False,
elements=None, qparams=None
):
# Resolve the minibatch, in_channels, out_channels, iH/iW, iK/iW
batch_size = draw(st.integers(*batch_size_range))
input_channels_per_group = draw(
st.integers(*input_channels_per_group_range))
output_channels_per_group = draw(
st.integers(*output_channels_per_group_range))
groups = draw(st.integers(1, max_groups))
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
if isinstance(spatial_dim, Iterable):
spatial_dim = draw(st.sampled_from(spatial_dim))
feature_map_shape = [draw(st.integers(*feature_map_range)) for _ in range(spatial_dim)]
kernels = [draw(st.integers(*kernel_range)) for _ in range(spatial_dim)]
tr = False
weight_shape = (output_channels, input_channels_per_group) + tuple(kernels)
bias_shape = output_channels
if can_be_transposed:
tr = draw(st.booleans())
if tr:
weight_shape = (input_channels, output_channels_per_group) + tuple(kernels)
bias_shape = output_channels
# Resolve the tensors
if qparams is not None:
if isinstance(qparams, (list, tuple)):
assert len(qparams) == 3, "Need 3 qparams for X, w, b"
else:
qparams = [qparams] * 3
X = draw(tensor(shapes=(
(batch_size, input_channels) + tuple(feature_map_shape),),
elements=elements, qparams=qparams[0]))
W = draw(tensor(shapes=(weight_shape,), elements=elements,
qparams=qparams[1]))
b = draw(tensor(shapes=(bias_shape,), elements=elements,
qparams=qparams[2]))
return X, W, b, groups, tr
# We set the deadline in the currently loaded profile.
# Creating (and loading) a separate profile overrides any settings the user
# already specified.
hypothesis_version = hypothesis.version.__version_info__
current_settings = settings._profiles[settings._current_profile].__dict__
current_settings['deadline'] = None
if hypothesis_version >= (3, 16, 0) and hypothesis_version < (5, 0, 0):
current_settings['timeout'] = hypothesis.unlimited
def assert_deadline_disabled():
if hypothesis_version < (3, 27, 0):
import warnings
warning_message = (
"Your version of hypothesis is outdated. "
"To avoid `DeadlineExceeded` errors, please update. "
f"Current hypothesis version: {hypothesis.__version__}"
)
warnings.warn(warning_message)
else:
assert settings().deadline is None
```
|
=================================================================================================================================
SOURCE CODE FILE: inductor_utils.py
LINES: 5
SIZE: 6.17 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\inductor_utils.py
ENCODING: utf-8
```py
# mypy: ignore-errors
import logging
import torch
import re
import unittest
import functools
import contextlib
import os
from subprocess import CalledProcessError
import sys
import torch._inductor.async_compile # noqa: F401 required to warm up AsyncCompile pools
from torch.fx.experimental.proxy_tensor import make_fx
from torch._inductor.graph import GraphLowering
from torch._inductor.compile_fx import shape_env_from_inputs
from torch._inductor.codecache import CppCodeCache
from torch._inductor.utils import get_gpu_shared_memory, is_big_gpu
from torch._inductor.utils import GPU_TYPES, get_gpu_type
from torch.utils._triton import has_triton
from torch.testing._internal.common_utils import (
LazyVal,
IS_FBCODE,
)
from torch.testing._internal.common_utils import (
TestCase,
IS_CI,
IS_WINDOWS,
)
log: logging.Logger = logging.getLogger(__name__)
def test_cpu():
try:
CppCodeCache.load("")
return not IS_FBCODE
except (
CalledProcessError,
OSError,
torch._inductor.exc.InvalidCxxCompiler,
torch._inductor.exc.CppCompileError,
):
return False
HAS_CPU = LazyVal(test_cpu)
HAS_TRITON = has_triton()
if HAS_TRITON:
import triton
TRITON_HAS_CPU = "cpu" in triton.backends.backends
else:
TRITON_HAS_CPU = False
HAS_CUDA = torch.cuda.is_available() and HAS_TRITON
HAS_XPU = torch.xpu.is_available() and HAS_TRITON
HAS_MPS = torch.mps.is_available()
HAS_GPU = HAS_CUDA or HAS_XPU
GPU_TYPE = get_gpu_type()
HAS_MULTIGPU = any(
getattr(torch, gpu).is_available() and getattr(torch, gpu).device_count() >= 2
for gpu in GPU_TYPES
)
def _check_has_dynamic_shape(
self: TestCase,
code,
):
for_loop_found = False
has_dynamic = False
lines = code.split("\n")
for line in lines:
if "for(" in line:
for_loop_found = True
if re.search(r";.*ks.*;", line) is not None:
has_dynamic = True
break
self.assertTrue(
has_dynamic, msg=f"Failed to find dynamic for loop variable\n{code}"
)
self.assertTrue(for_loop_found, f"Failed to find for loop\n{code}")
def skipDeviceIf(cond, msg, *, device):
if cond:
def decorate_fn(fn):
@functools.wraps(fn)
def inner(self, *args, **kwargs):
if not hasattr(self, "device"):
warn_msg = "Expect the test class to have attribute device but not found. "
if hasattr(self, "device_type"):
warn_msg += "Consider using the skip device decorators in common_device_type.py"
log.warning(warn_msg)
if self.device == device:
raise unittest.SkipTest(msg)
return fn(self, *args, **kwargs)
return inner
else:
def decorate_fn(fn):
return fn
return decorate_fn
def skip_windows_ci(name: str, file: str) -> None:
if IS_WINDOWS and IS_CI:
module = os.path.basename(file).strip(".py")
sys.stderr.write(
f"Windows CI does not have necessary dependencies for {module} tests yet\n"
)
if name == "__main__":
sys.exit(0)
raise unittest.SkipTest("requires sympy/functorch/filelock")
# TODO: Remove HAS_MPS condition when `HAS_GPU` includes HAS_MPS
requires_gpu = functools.partial(unittest.skipIf, not (HAS_GPU or HAS_MPS), "requires gpu")
requires_triton = functools.partial(unittest.skipIf, not HAS_TRITON, "requires triton")
def requires_cuda_with_enough_memory(min_mem_required):
def inner(fn):
if not torch.cuda.is_available() or torch.cuda.get_device_properties().total_memory < min_mem_required:
return unittest.skip(f"Only if the CUDA device has at least {min_mem_required / 1e9:.3f}GB memory to be safe")(fn)
else:
return fn
return inner
skipCUDAIf = functools.partial(skipDeviceIf, device="cuda")
skipXPUIf = functools.partial(skipDeviceIf, device="xpu")
skipCPUIf = functools.partial(skipDeviceIf, device="cpu")
IS_A100 = LazyVal(
lambda: HAS_CUDA
and get_gpu_shared_memory() == 166912
)
IS_H100 = LazyVal(
lambda: HAS_CUDA
and get_gpu_shared_memory() == 232448
)
IS_BIG_GPU = LazyVal(lambda: HAS_CUDA and is_big_gpu())
def dummy_graph() -> GraphLowering:
"""
Create a graph. This is useful for unit testing code which accesses
V.graph.sizevars.
"""
example_inputs = [torch.randn(10) for _ in range(2)]
gm = make_fx(torch.add, tracing_mode="fake")(*example_inputs)
shape_env = shape_env_from_inputs(example_inputs)
graph = GraphLowering(
gm,
shape_env=shape_env,
)
return graph
def maybe_skip_size_asserts(op):
"""
For certain ops, there meta and eager implementation returns differents
strides. This cause size/strides assert fail. Skip adding those
asserts for now.
"""
if (
op.aten_name
in (
"fft_hfftn",
"fft_hfft",
"fft_hfft2",
"fft_ihfftn",
"fft_fft",
"fft_fft2",
"fft_fftn",
"fft_ifft",
"fft_ifft2",
"fft_ifftn",
"fft_irfft",
"fft_irfft2",
"fft_irfftn",
"fft_ihfft",
"fft_ihfft2",
"fft_rfft",
"fft_rfft2",
"fft_rfftn",
"linalg_eig",
"linalg_eigvals",
)
and "TORCHINDUCTOR_SIZE_ASSERTS" not in os.environ
):
return torch._inductor.config.patch(size_asserts=False)
else:
return contextlib.nullcontext()
def clone_preserve_strides_offset(x, device=None):
if not isinstance(x, torch.Tensor):
return x
buffer = torch.as_strided(
x, (x.untyped_storage().size() // x.element_size(),), (1,), 0
)
if not device:
buffer = buffer.clone()
else:
buffer = buffer.to(device, copy=True)
out = torch.as_strided(buffer, x.size(), x.stride(), x.storage_offset())
return out
```
|
============================================================================================================================================
SOURCE CODE FILE: jit_metaprogramming_utils.py
LINES: 1
SIZE: 33.97 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\jit_metaprogramming_utils.py
ENCODING: utf-8
```py
# mypy: ignore-errors
# Torch
from torch.jit.annotations import BroadcastingList2, BroadcastingList3 # noqa: F401
import torch.nn.functional as F
import torch
import torch.cuda
import torch.jit
import torch.jit._logging
import torch.jit.frontend
from torch.testing._internal.common_nn import module_tests, get_new_module_tests
from torch.testing._internal.common_utils import is_iterable_of_tensors, noncontiguous_like
import collections
from copy import deepcopy
from typing import Any, Union
import math # noqa: F401
# Testing utils
from torch import inf
assert torch.get_default_dtype() == torch.float32
L = 20
M = 10
S = 5
def unpack_variables(args):
if isinstance(args, tuple):
return tuple(unpack_variables(elem) for elem in args)
else:
return args
class dont_convert(tuple):
__slots__ = ()
non_differentiable = collections.namedtuple('non_differentiable', ['tensor'])
def create_input(call_args, requires_grad=True, non_contiguous=False, call_kwargs=None, dtype=torch.float, device=None):
if not isinstance(call_args, tuple):
call_args = (call_args,)
def map_arg(arg):
def maybe_non_contig(tensor):
if not non_contiguous or tensor.numel() < 2:
return tensor.clone()
return noncontiguous_like(tensor)
def conjugate(tensor):
return tensor.conj()
if isinstance(arg, (torch.Size, dont_convert)):
return arg
elif isinstance(arg, tuple) and len(arg) == 0:
var = conjugate(torch.randn((), dtype=dtype, device=device))
var.requires_grad = requires_grad
return var
elif isinstance(arg, tuple) and not isinstance(arg[0], torch.Tensor):
return conjugate(maybe_non_contig(torch.randn(*arg, dtype=dtype, device=device))).requires_grad_(requires_grad)
# double check casting
elif isinstance(arg, non_differentiable):
if isinstance(arg.tensor, torch.Tensor):
return conjugate(maybe_non_contig(arg.tensor.to(device=device)))
return conjugate(maybe_non_contig(arg.tensor.to(device=device)))
elif isinstance(arg, torch.Tensor):
if arg.is_complex() != dtype.is_complex:
raise RuntimeError("User provided tensor is real for a test that runs with complex dtype, ",
"which is not supported for now")
# NOTE: We do clone() after detach() here because we need to be able to change size/storage of v afterwards
v = conjugate(maybe_non_contig(arg)).detach().to(device=device).clone()
v.requires_grad = requires_grad and (v.is_floating_point() or v.is_complex())
return v
elif callable(arg):
return map_arg(arg(dtype=dtype, device=device))
else:
return arg
args_out = tuple(map_arg(arg) for arg in call_args)
kwargs_out = {k: map_arg(v) for k, v in call_kwargs.items()} if call_kwargs else {}
return args_out, kwargs_out
# NB: JIT script tests for all nn functional interfaces, script mode does
# not support in_place operations yet, so no inplace operation tests added.
# removed all the deprecated functions
#
# (
# method name,
# input size/constructing fn,
# args (tuple represents shape of a tensor arg),
# test variant name(will be used at test name suffix,
# 'inplace' skips grad tests), // optional
# (True, nonfusible_nodes, fusible_nodes) for autodiff // optional
# fn to determine if test should be skipped, // optional
# fn mapping output to part that should be gradcheck'ed, // optional
# kwargs for function, // optional
# )
def get_nn_functional_tests():
nn_functional_tests = [
('conv1d', (S, S, S), ((S, S, S),)),
('conv2d', (S, S, S, S), ((S, S, S, S),)),
('conv3d', (S, S, S, S, S), ((S, S, S, S, S),)),
('conv_transpose1d', (S, S, S), ((S, S, S),)),
('conv_transpose2d', (S, S, S, S), ((S, S, S, S),)),
('conv_transpose3d', (S, S, S, S, S), ((S, S, S, S, S),)),
('conv_tbc', (S, S, S), ((S, S, S), (S,), 2)),
('avg_pool1d', (S, S, S), (3,)),
('avg_pool2d', (S, S, S, S), (3,), '', (True,)),
('avg_pool3d', (S, S, S, S, S), (3,)),
('fractional_max_pool2d', (S, S, S, S), (3, [2, 3],)),
('max_pool1d', (S, S, S), (2, 1)),
('max_pool1d', (S, S, S), (2, 1, 1, 1, False, True), 'with_indices'),
('max_pool2d', (S, S, S, S), (2, 1), '', (True, 'aten::max_pool2d_with_indices')),
('max_pool2d', (S, S, S, S), (2, 1, 1, 1, False, True), 'with_indices', (True, 'aten::max_pool2d_with_indices')),
('max_pool3d', (S, S, S, S, S), (2, 1)),
('max_unpool1d', torch.tensor([[[2., 4]]]), (torch.tensor([[[1, 3]]]), 2, 2, 0)),
('max_unpool2d', torch.tensor([[[[2., 4]]]]), (torch.tensor([[[[1, 3]]]]), 2, 2, 0)),
('max_unpool3d', torch.tensor([[[[[2., 4]]]]]), (torch.tensor([[[[[1, 3]]]]]), 2, 2, 0)),
('lp_pool1d', (S, S, S), (2., 3, 2,)),
('lp_pool2d', (S, S, S, S), (2., 3, 2,)),
('lp_pool3d', (S, S, S, S, S), (2., 3, 2,)),
('adaptive_max_pool1d', (S, S, S), (5,)),
('adaptive_max_pool2d', (S, S, S, S), ([5, 7],)),
('adaptive_max_pool3d', (S, S, S, S, S), ([3, 2, 2],)),
('adaptive_avg_pool1d', (S, S, S), (5,), '', (True,)),
('adaptive_avg_pool2d', (S, S, S, S), ([5, 7],), '', (True,)),
('adaptive_avg_pool3d', (S, S, S, S, S), ([3, 2, 2],), '', (True,)),
('dropout', (S, S, S), (0.5,), '', (True, 'aten::native_dropout')),
('alpha_dropout', (S, S, S), (0.5,)),
('dropout2d', (S, S, S), (0.5,)),
('dropout2d', (S, S, S, S), (0.5,), 'batched'),
('dropout3d', (S, S, S, S), (0.5,)),
('dropout3d', (S, S, S, S, S), (0.5,), 'batched'),
('feature_alpha_dropout', (S, S, S), (0.5,)),
('threshold', (S, S, S), (0.1, 2.), '', (True,)),
('threshold', (S, S, S), (0.1, 2., True), 'inplace'),
('relu', (S, S, S), (), '', (True,)),
('relu', (S, S, S), (), 'inplace'),
('glu', (S - 1, S - 1, S - 1), (),),
('hardtanh', (S, S, S), (-0.5, 0.5), '', (True,)),
('hardtanh', (S, S, S), (-0.5, 0.5, True), 'inplace'),
('relu6', (S, S, S), (), '', (True,)),
('relu6', (S, S, S), (True), 'inplace'),
('elu', (S, S, S), (0.9,),),
('elu', (S, S, S), (0.9, True), 'inplace'),
('selu', (S, S, S), (),),
('selu', (S, S, S), (True), 'inplace'),
('celu', (S, S, S), (0.9,),),
('celu', (S, S, S), (0.9, True), 'inplace'),
('leaky_relu', (S, S, S), (0.02,), '', (True,)),
('leaky_relu', (S, S, S), (0.02,), 'inplace'),
('rrelu', (S, S), (0.1, 0.3, False),),
('rrelu', (S, S), (0.1, 0.3, False, True), 'inplace'),
('hardshrink', (S, S, S), (0.4,), '', (True,)),
('tanhshrink', (S, S, S), (),),
('softsign', (S, S, S), (),),
('softplus', (S, S, S), (), '', (True,)),
('softmin', (S, S, S), (0,),),
('softmax', (S, S, S), (0,), '', (True,)),
('softmax', (S, S, S), (0, 3, torch.double), 'with_all_args', (True,)),
('tanh', (S, S, S), (), '', (True,)),
('sigmoid', (S, S, S), (), '', (True,)),
('silu', (S, S, S), (), '', (True,)),
('log_softmax', (S, S, S), (0,), '', (True,)),
('linear', (S, S), ((M, S),), '', (True, ['aten::linear'])),
('linear', (S, S), ((M, S), (M,)), 'addmm', (True, ['aten::linear'])),
('bilinear', (S, S, S), ((S, S, M), torch.zeros(M, S, M),),),
('embedding', torch.tensor([[1, 2, 4, 5], [4, 3, 2, 5]]), (torch.rand(6, 3), ), '', (True,)),
('embedding_bag', torch.tensor([1, 2, 4, 2]), (torch.rand(5, 3), torch.tensor([0, 4]),),),
('batch_norm', (S, S),
(non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), None, None, True, ),
'training', (True, 'aten::_batch_norm_impl_index')),
('batch_norm', (0, S, S, S),
(non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ),
'size_zero', (True, 'aten::_batch_norm_impl_index')),
('batch_norm', (0, S, S, S),
(non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ),
'size_zero_inference', (True, 'aten::_batch_norm_impl_index')),
('batch_norm', (S, S),
(non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ),
'with_weight_and_bias_training', (True, 'aten::_batch_norm_impl_index')),
('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
None, non_differentiable(torch.ones(S)), True, ),
'with_only_bias_training', (True, 'aten::_batch_norm_impl_index')),
('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
non_differentiable(torch.randn(S)), None, True, ),
'with_only_weight_training', (True, 'aten::_batch_norm_impl_index')),
('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
None, None, False, ),
'inference', (True, 'aten::_batch_norm_impl_index')),
('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), False, ),
'with_weight_and_bias_inference', (True, 'aten::_batch_norm_impl_index')),
('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
None, non_differentiable(torch.ones(S)), False, ),
'with_only_bias_inference', (True, 'aten::_batch_norm_impl_index')),
('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
non_differentiable(torch.randn(S)), None, False, ),
'with_only_weight_inference', (True, 'aten::_batch_norm_impl_index')),
('instance_norm', (S, S, S), (non_differentiable(torch.zeros(S)), non_differentiable(torch.ones(S))),),
('layer_norm', (S, S, S, S), ([5],), '',
(False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])),
('layer_norm', (S, S, S, S), ([5], non_differentiable(torch.rand(S)),), 'with_only_weight',
(False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])),
('layer_norm', (S, S, S, S), ([5], None, non_differentiable(torch.rand(S)),), 'with_only_bias',
(False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])),
('layer_norm', (S, S, S, S), ([5], non_differentiable(torch.rand(S)),
non_differentiable(torch.rand(S))), 'with_weight_and_bias',
(False, ['aten::contiguous', 'aten::_batch_norm_impl_index', 'aten::addcmul'])),
('group_norm', (S, S, S), (1, torch.rand(5),),),
('local_response_norm', (S, S, S), (2, ),),
('nll_loss', F.log_softmax(torch.randn(3, 5), dim=0), (torch.tensor([1, 0, 4]),), '',),
('poisson_nll_loss', torch.rand(S, 2), (torch.rand(S, 2),),),
('poisson_nll_loss', torch.rand(S, 2), (torch.rand(S, 2), True, True), 'full'),
('kl_div', F.log_softmax(torch.randn(S, 10), 1), (F.softmax(torch.randn(S, 10), 1),),),
('cross_entropy', (3, S), (torch.randint(S, (3,), dtype=torch.int64),),),
('binary_cross_entropy_with_logits', (3,), (torch.empty(3).random_(2), ),),
('smooth_l1_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
('huber_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
('l1_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
('mse_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
('smooth_l1_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),
('huber_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),
('l1_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),
('mse_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),
('margin_ranking_loss', (S,), ((S,), (S,)),),
('hinge_embedding_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
('soft_margin_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
('multilabel_soft_margin_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
('cosine_embedding_loss', (S, S), ((S, S), non_differentiable(torch.rand(S,))),),
('pixel_shuffle', (1, 9, 4, 4), (3,),),
('pixel_unshuffle', (1, 1, 12, 12), (3,),),
('affine_grid', (S, 2, 3), (torch.Size([S, 1, 7, 7]),),),
('pad', (3, 3, 4, 2), ([1, 1],),),
('pairwise_distance', (S, S), ((S, S),),),
('pdist', (S, S), (),),
('cosine_similarity', (S, S), ((S, S),),),
('triplet_margin_loss', (S, S), ((S, S), (S, S)),),
('normalize', (S, S, S), (),),
('unfold', (S, S, S, S), ([2, 3]),),
('fold', (1, 3 * 2 * 2, 12), ([4, 5], [2, 2]),),
('grid_sample', (S, S, S, S), (non_differentiable(torch.rand(S, S, S, 2)),),),
('gumbel_softmax', (S, S), (2.,), '', (True, ['aten::softmax', 'aten::add', 'aten::div'], ['aten::neg'])),
('gumbel_softmax', (S, S), (2., True,), 'hard', (True, ['aten::softmax', 'aten::add', 'aten::div'], ['aten::neg'])),
('multilabel_margin_loss', torch.tensor([[0.2, -0.2, 0.07]]), (torch.tensor([[0, 0, 1]]),),),
('multi_margin_loss', (S, S), (non_differentiable(torch.randint(S, (S, ), dtype=torch.int64)),
1, 1., non_differentiable(torch.randn(S))),),
('binary_cross_entropy', torch.randn(3, 2).sigmoid(), (non_differentiable(torch.rand(3, 2)),
non_differentiable(torch.randn(3, 2))),),
('binary_cross_entropy', torch.randn(3, 2).sigmoid(),
(non_differentiable(torch.rand(3, 2)),
non_differentiable(torch.randn(3, 2)), None, None, 'mean'), 'size_average'),
('ctc_loss', torch.rand(S, S, S).log_softmax(2).detach().requires_grad_(),
(torch.randint(1, S, (S, S), dtype=torch.long), torch.full((S,), S, dtype=torch.long),
torch.randint(1, S, (S,), dtype=torch.long))),
('upsample', torch.randn(S, S, M, M), (None, 2.), 'with_scale'),
('upsample', torch.randn(S, S, M, M), (4,), 'with_size'),
('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'nearest_4d'),
('interpolate', torch.randn(S, S, M, M), (None, 2.), 'nearest_4d_with_scale'),
('interpolate', torch.randn(S, S, M, M), (4,), 'nearest_4d_with_size'),
('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'area_4d'),
('interpolate', torch.randn(S, S, M, M), (None, 2.), 'area_4d_with_scale'),
('interpolate', torch.randn(S, S, M, M), (4,), 'area_4d_with_size'),
('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'bilinear_4d'),
('interpolate', torch.randn(S, S, M, M), (None, 2.), 'bilinear_4d_with_scale'),
('interpolate', torch.randn(S, S, M, M), (4,), 'bilinear_4d_with_size'),
('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'bicubic_4d'),
('interpolate', torch.randn(S, S, M, M), (None, 2.), 'bicubic_4d_with_scale'),
('interpolate', torch.randn(S, S, M, M), (4,), 'bicubic_4d_with_size'),
('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'nearest_3d'),
('interpolate', torch.randn(S, M, M), (None, 2.), 'nearest_3d_with_scale'),
('interpolate', torch.randn(S, M, M), (4,), 'nearest_3d_with_size'),
('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'area_3d'),
('interpolate', torch.randn(S, M, M), (None, 2.), 'area_3d_with_scale'),
('interpolate', torch.randn(S, M, M), (4,), 'area_3d_with_size'),
('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'linear_3d'),
('interpolate', torch.randn(S, M, M), (None, 2.), 'linear_3d_with_scale'),
('interpolate', torch.randn(S, M, M), (4,), 'linear_3d_with_size'),
('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'nearest_5d_with_scale'),
('interpolate', torch.randn(S, M, M, M, M), (4,), 'nearest_5d_with_size'),
('interpolate', torch.zeros(3, 3, 3).view(1, 1, 3, 3, 3), (2,), 'area_5d'),
('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'area_5d_with_scale'),
('interpolate', torch.randn(S, M, M, M, M), (4,), 'area_5d_with_size'),
('interpolate', torch.zeros(3, 3, 3).view(1, 1, 3, 3, 3), (2,), 'trilinear_5d'),
('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'trilinear_5d_with_scale'),
('interpolate', torch.randn(S, M, M, M, M), (4,), 'trilinear_5d_with_size'),
('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2, None, 'nearest', None, False),
'nearest_4d_not_recompute_scale_factor'),
('interpolate', torch.randn(S, S, M, M), (4, None, 'nearest', None, False),
'nearest_4d_with_size_not_recompute_scale_factor'),
('interpolate', torch.randn(S, S, M, M), (None, 2., 'bilinear', None, False),
'bilinear_4d_with_scale_not_recompute_scale_factor'),
('interpolate', torch.randn(S, S, M, M), (4, None, 'bilinear', None, False),
'bilinear_4d_with_size_not_recompute_scale_factor'),
('interpolate', torch.randn(S, S, M, M), (None, 2., 'bicubic', None, False),
'bicubic_4d_with_scale_not_recompute_scale_factor'),
('interpolate', torch.randn(S, S, M, M), (4, None, 'bicubic', None, False),
'bicubic_4d_with_size_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M), (None, 2., 'nearest', None, False),
'nearest_3d_with_scale_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M), (4, None, 'nearest', None, False),
'nearest_3d_with_size_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M), (None, 2., 'linear', None, False),
'linear_3d_with_scale_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M), (4, None, 'linear', None, False),
'linear_3d_with_size_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M, M, M), (None, 2., 'nearest', None, False),
'nearest_5d_with_scale_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M, M, M), (4, None, 'nearest', None, False),
'nearest_5d_with_size_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M, M, M), (None, 2., 'trilinear', None, False),
'trilinear_5d_with_scale_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M, M, M), (4, None, 'trilinear', None, False),
'trilinear_5d_with_size_not_recompute_scale_factor'),
]
return nn_functional_tests
script_template = '''
def the_method({}):
return {}
'''
def value_to_literal(value):
if isinstance(value, str):
# Quotes string and escapes special characters
return ascii(value)
if isinstance(value, torch.Tensor):
return 'torch.' + str(value)
else:
return str(value)
def get_call(method_name, func_type, args, kwargs):
kwargs_str = ', '.join([k + '=' + value_to_literal(v) for k, v in kwargs.items()])
self_arg = args[0]
if func_type == 'method':
args = args[1:]
argument_str = ', '.join(args)
argument_str += ', ' if len(args) and len(kwargs) else ''
argument_str += kwargs_str
if func_type == 'functional' or func_type == 'function':
call = f'torch.{method_name}({argument_str})'
elif func_type == 'method':
call = f'{self_arg}.{method_name}({argument_str})'
elif func_type == 'nn_functional':
call = f'torch.nn.functional.{method_name}({argument_str})'
else:
raise TypeError('Unsupported function type')
return call
def get_constant(x):
if x == inf:
return 'math.inf'
if x == -inf:
return '-math.inf'
return x
def get_script_args(args):
formals: list[str] = []
tensors: list[Union[torch.Tensor, list[torch.Tensor]]] = []
actuals: list[str] = []
for arg in args:
if isinstance(arg, torch.Tensor):
name = f'i{len(formals)}'
formals.append(name)
actuals.append(name)
tensors.append(arg)
elif is_iterable_of_tensors(arg):
name = f'i{len(formals)}'
formals.append(name + ': List[torch.Tensor]')
actuals.append(name)
tensors.append(list(arg))
elif isinstance(arg, str):
actuals.append(f"'{arg}'")
else:
actuals.append(str(get_constant(arg)))
return (formals, tensors, actuals)
# create a script function from (name, func_type, output_process_fn),
# and returns the compiled function and example inputs
def gen_script_fn_and_args(method_name, func_type, *args, **kwargs):
formals, tensors, actuals = get_script_args(args)
call = get_call(method_name, func_type, actuals, kwargs)
script = script_template.format(', '.join(formals), call)
CU = torch.jit.CompilationUnit(script)
return CU.the_method, tensors
# create a script function from (name, func_type),
# returns a function takes in (args, kwargs) and runs the compiled function
def create_script_fn(self, method_name, func_type):
# function returns tuple containing original output and
# filtered output to be used in checking gradients
def script_fn(*args, **kwargs):
fn, tensors = gen_script_fn_and_args(method_name, func_type, *args, **kwargs)
self.assertExportImport(fn.graph, tensors)
output = fn(*tensors)
# skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087
script_fn.last_graph = fn.graph_for(*tensors) # type: ignore[attr-defined]
return output
return script_fn
class SplitInputs:
all_tensors: list[Any]
tensor_args: list[Any]
nontensor_args: list[Any]
arg_types: list[str]
tensor_kwargs: dict[str, Any]
kwarg_order: list[str]
nontensor_kwargs: dict[str, Any]
kwarg_types: dict[str, Any]
@staticmethod
def _is_tensor_input(arg):
return isinstance(arg, torch.Tensor) or is_iterable_of_tensors(arg)
def __init__(self, args, kwargs):
self.arg_types = ['t' if self._is_tensor_input(arg) else 's' for arg in args]
self.kwarg_types = {k: 't' if self._is_tensor_input(v) else 's' for k, v in kwargs.items()}
self.tensor_args = [arg for arg in args if self._is_tensor_input(arg)]
self.nontensor_args = [arg for arg in args if not self._is_tensor_input(arg)]
self.tensor_kwargs = {k: v for k, v in kwargs.items() if self._is_tensor_input(v)}
self.nontensor_kwargs = {k: v for k, v in kwargs.items() if not self._is_tensor_input(v)}
self.all_tensors = [*self.tensor_args, *[v for k, v in self.tensor_kwargs.items()]]
self.kwarg_order = [k for k, v in kwargs.items()]
def nontensors_match(self, other: 'SplitInputs'):
if self.arg_types != other.arg_types:
return False
if self.kwarg_types != other.kwarg_types:
return False
if self.kwarg_order != other.kwarg_order:
return False
if self.nontensor_args != other.nontensor_args:
return False
if self.nontensor_kwargs != other.nontensor_kwargs:
return False
return True
# make a new function where all non-tensor arguments in 'args' have been partially
# applied, and all tensor arguments remain.
# used to trace functions when some arguments are not tensors
def partial_apply_nontensors(fn, args, kwargs):
inputs = SplitInputs(args, kwargs)
def new_fn(*tensors_):
tensors = iter(tensors_)
full_args = [args[i] if s == 's' else next(tensors) for i, s in enumerate(inputs.arg_types)]
full_kwargs = {k: kwargs[k] if s == 's' else next(tensors) for k, s in inputs.kwarg_types.items()}
return fn(*full_args, **full_kwargs)
return new_fn, inputs
# create a trace function from input fn
def create_traced_fn(self, fn, cache_traced_fn=False):
def traced_fn(*inputs, **kwargs):
# `check_trace` is set to False because check_trace is run with @no_grad
# Also, `check_against_reference` already does all the checks
# against python function
fn_tensors, split_inputs = partial_apply_nontensors(fn, inputs, kwargs)
if not cache_traced_fn or not hasattr(traced_fn, 'traced'):
traced = torch.jit.trace(fn_tensors, split_inputs.all_tensors, check_trace=False)
self.assertExportImport(traced.graph, split_inputs.all_tensors)
output = traced(*split_inputs.all_tensors)
if cache_traced_fn:
traced_fn.traced = traced
traced_fn.split_inputs = split_inputs
else:
# Guard to check that nontensor inputs are the same as during tracing
self.assertTrue(traced_fn.split_inputs.nontensors_match(split_inputs))
output = traced_fn.traced(*split_inputs.all_tensors)
traced = traced_fn.traced
# skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087
traced_fn.last_graph = traced.graph_for(*split_inputs.all_tensors) # type: ignore[attr-defined]
traced_fn.graph = traced.graph # type: ignore[attr-defined]
return output
return traced_fn
# known to be failing in script
EXCLUDE_SCRIPT = {
'test_norm_fro_default',
'test_norm_fro_cpu',
'test_norm_nuc',
'test_norm_fro',
'test_norm_nuc_batched',
# aten op has additional cudnn argument
'test_nn_unfold',
# flaky test - TODO fix
'test_nn_ctc_loss',
# unknown builtin op
'test_nn_fold',
# jit doesn't support sparse tensors.
'test_to_sparse',
'test_to_sparse_dim',
}
# generates a script function and set of example inputs
# from a specified test in the format of nn_functional_tests
def get_nn_functional_compiled_fn_and_inputs(name, self_size, args, variant_name='', *extra_args):
test_name = 'test_nn_' + name
if variant_name != '':
test_name = test_name + '_' + variant_name
self_variable = create_input((self_size,))[0][0]
# need to record this because methods can change the size (e.g. unsqueeze)
args_variable, _kwargs_variable = create_input(args)
self_tensor = deepcopy(self_variable.data)
args_tensor = deepcopy(unpack_variables(args_variable))
f_args_variable = (self_variable,) + args_variable
f_args_tensor = (self_tensor,) + args_tensor # noqa: F841
with torch._jit_internal._disable_emit_hooks():
script_fn, inputs = gen_script_fn_and_args(name, "nn_functional", *f_args_variable)
return script_fn, inputs
EXCLUDE_SCRIPT_MODULES = {
'test_nn_AdaptiveAvgPool2d_tuple_none',
'test_nn_AdaptiveAvgPool3d_tuple_none',
'test_nn_AdaptiveMaxPool2d_tuple_none',
'test_nn_AdaptiveMaxPool3d_tuple_none',
# Doesn't use future division, so this is not supported
'test_nn_CrossMapLRN2d',
# Derivative for aten::_scaled_dot_product_flash_attention_backward is not implemented
'test_nn_TransformerDecoderLayer_gelu_activation',
'test_nn_TransformerDecoderLayer_relu_activation',
'test_nn_TransformerEncoderLayer_gelu_activation',
'test_nn_TransformerEncoderLayer_relu_activation',
'test_nn_Transformer_multilayer_coder',
}
script_method_template = '''
def forward({}):
return {}
'''
def create_script_module(self, nn_module, constructor_args, *args, **kwargs):
def script_module(*args, **kwargs):
_formals, tensors, actuals = get_script_args(args)
method_args = ', '.join(['self'] + actuals)
call_args_str = ', '.join(actuals)
call = f"self.submodule({call_args_str})"
script = script_method_template.format(method_args, call)
submodule_constants = []
if kwargs.get('is_constant'):
submodule_constants = ['submodule']
# Create module to use the script method
class TheModule(torch.jit.ScriptModule):
__constants__ = submodule_constants
def __init__(self) -> None:
super().__init__()
self.submodule = nn_module(*constructor_args)
def make_module(script):
module = TheModule()
# check __repr__
str(module)
module.define(script)
return module
module = make_module(script)
if self:
self.assertExportImportModule(module, tensors)
module(*args)
# skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087
create_script_module.last_graph = module.graph # type: ignore[attr-defined]
return module
return script_module
def check_alias_annotation(method_name, args, kwargs, *, aten_name, func_type='method'):
formals, tensors, actuals = get_script_args(args)
call = get_call(method_name, func_type, actuals, kwargs)
script = script_template.format(', '.join(formals), call)
CU = torch.jit.CompilationUnit(script)
# to clean up IR
torch._C._jit_pass_inline(CU.the_method.graph)
torch._C._jit_pass_constant_propagation(CU.the_method.graph)
torch._C._jit_check_alias_annotation(CU.the_method.graph, tuple(tensors), aten_name)
def get_nn_module_name_from_kwargs(**kwargs):
if 'module_name' in kwargs:
return kwargs['module_name']
elif 'fullname' in kwargs:
return kwargs['fullname']
elif 'constructor' in kwargs:
return kwargs['constructor'].__name__
def get_nn_mod_test_name(**kwargs):
if 'fullname' in kwargs:
test_name = kwargs['fullname']
else:
test_name = get_nn_module_name_from_kwargs(**kwargs)
if 'desc' in kwargs:
test_name = f"{test_name}_{kwargs['desc']}"
return f'test_nn_{test_name}'
def get_nn_module_class_from_kwargs(**kwargs):
name = get_nn_module_name_from_kwargs(**kwargs)
index = name.find("_")
if index == -1:
return name
else:
return name[0:name.find("_")]
def try_get_nn_module_compiled_mod_and_inputs(*args, **kwargs):
name = get_nn_module_name_from_kwargs(**kwargs)
if 'desc' in kwargs and 'eval' in kwargs['desc']:
# eval() is not supported, so skip these tests
return
test_name = name
if 'desc' in kwargs:
test_name = f"{test_name}_{kwargs['desc']}"
test_name = get_nn_mod_test_name(**kwargs)
if test_name in EXCLUDE_SCRIPT_MODULES:
return
if 'constructor' in kwargs:
nn_module = kwargs['constructor']
else:
nn_module = getattr(torch.nn, name)
if "FunctionalModule" in str(nn_module):
return
if 'constructor_args_fn' in kwargs:
constructor_args = kwargs['constructor_args_fn']()
else:
constructor_args = kwargs.get('constructor_args', ())
# Set up inputs from tuple of sizes or constructor fn
input_dtype = torch.double
if 'input_fn' in kwargs:
input = kwargs['input_fn']()
if isinstance(input, torch.Tensor):
input = (input,)
if all(tensor.is_complex() for tensor in input):
input_dtype = torch.cdouble
else:
input = (kwargs['input_size'],)
# Extra parameters to forward()
if 'extra_args' in kwargs:
input = input + kwargs['extra_args']
if 'target_size' in kwargs:
input = input + (kwargs['target_size'],)
elif 'target_fn' in kwargs:
if torch.is_tensor(input):
input = (input,)
input = input + (kwargs['target_fn'](),)
args_variable, _kwargs_variable = create_input(input, dtype=input_dtype)
f_args_variable = deepcopy(unpack_variables(args_variable))
out_var = deepcopy(f_args_variable)
_args, mod = f_args_variable, create_script_module(
None, nn_module, constructor_args, *f_args_variable
)(*f_args_variable)
return mod, out_var
def get_all_nn_module_tests():
# additional modules test
# TODO: delete this list once we make all nn_tests work
additional_module_tests = [
{
'module_name': 'Bilinear',
'constructor_args': (S, S, M),
'input_size': (S, S),
'extra_args': ((S, S),)
},
{
'module_name': 'RNNCell',
'constructor_args': (S, S),
'input_size': (S, S),
},
{
'module_name': 'LSTMCell',
'constructor_args': (S, S),
'input_size': (S, S),
},
{
'module_name': 'GRUCell',
'constructor_args': (S, S),
'input_size': (S, S),
},
{
'module_name': 'MultiheadAttention',
'constructor_args': (128, 8),
'input_size': (10, 8, 128),
'extra_args': (torch.randn(10, 8, 128), torch.randn(10, 8, 128)),
'slowTest': True
},
{
'module_name': 'Transformer',
'constructor_args': (1, 1, 1, 1, 2),
'input_size': (3, 1, 1),
'extra_args': (torch.randn(1, 1, 1),),
'slowTest': True
}
]
return module_tests + get_new_module_tests() + additional_module_tests
```
|
============================================================================================================================
SOURCE CODE FILE: jit_utils.py
LINES: 2
SIZE: 34.03 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\jit_utils.py
ENCODING: utf-8
```py
# mypy: ignore-errors
# Torch
from torch.autograd import Variable
from torch.autograd.function import _nested_map
from torch.jit.annotations import BroadcastingList2, BroadcastingList3 # noqa: F401
from torch.onnx import OperatorExportTypes
import torch
import torch.cuda
import torch.jit
import torch.jit._logging
import torch.jit.frontend
import torch.jit.quantized
import zipfile
import functools
# Testing utils
from torch.testing import FileCheck
from torch.testing._internal.common_utils import IS_WINDOWS, \
freeze_rng_state, enable_profiling_mode_for_profiling_tests, ProfilingMode, TEST_BAILOUTS, \
is_iterable_of_tensors
from torch.testing._internal.common_jit import JitCommonTestCase
from torch.testing._internal.common_utils import enable_profiling_mode # noqa: F401
# Standard library
from contextlib import contextmanager
from functools import reduce
from io import StringIO
from collections import defaultdict
import importlib.util
import inspect
import io
import math
import os
import pickle
import sys
import tempfile
import textwrap
from importlib.abc import Loader
from typing import Any, Union
RUN_CUDA = torch.cuda.is_available()
RUN_CUDA_MULTI_GPU = RUN_CUDA and torch.cuda.device_count() > 1
RUN_CUDA_HALF = RUN_CUDA
# HIP supports half, no version check necessary
if torch.cuda.is_available() and not torch.version.hip:
CUDA_VERSION = torch._C._cuda_getCompiledVersion()
for d in range(torch.cuda.device_count()):
major = torch.cuda.get_device_capability(d)[0]
if (major < 6):
RUN_CUDA_HALF = False
def execWrapper(code, glob, loc):
exec(code, glob, loc)
def do_input_map(fn, input):
return _nested_map(lambda t: isinstance(t, torch.Tensor), fn)(input)
def clear_class_registry():
torch._C._jit_clear_class_registry()
torch.jit._recursive.concrete_type_store = torch.jit._recursive.ConcreteTypeStore()
torch.jit._state._clear_class_state()
def get_execution_plan(graph_executor_state):
execution_plans = list(graph_executor_state.execution_plans.values())
num_plans = len(execution_plans)
if num_plans != 1:
raise RuntimeError('This test assumes this GraphExecutor should '
f'only have one execution plan, got: {num_plans}')
return execution_plans[0]
class _AssertRaisesRegexWithHighlightContext:
"""
A context manager that is useful for checking that error messages highlight
the correct part of the source code.
"""
def __init__(self, test_case, exception, regex, highlight):
self.test_case = test_case
self.exception_type = exception
self.regex = regex
self.highlight = highlight
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
with self.test_case.assertRaisesRegex(self.exception_type, self.regex):
if type:
raise value
if self.highlight:
FileCheck().check_source_highlighted(self.highlight).run(str(value))
return True
FUSION_GROUP = "prim::TensorExprGroup"
class JitTestCase(JitCommonTestCase):
_do_cuda_memory_leak_check = True
_restored_warnings = False
class capture_stdout(list):
"""
Replace sys.stdout with a temporary StringIO
"""
def __enter__(self):
self.sys_stdout = sys.stdout
self.stringio = StringIO()
sys.stdout = self.stringio
return self
def __exit__(self, *args):
self.append(str(self.stringio.getvalue()))
del self.stringio
sys.stdout = self.sys_stdout
class capture_stderr(list):
"""
Replace sys.stderr with a temporary StringIO
"""
def __enter__(self):
self.sys_stderr = sys.stderr
self.stringio = StringIO()
sys.stderr = self.stringio
return self
def __exit__(self, *args):
self.append(str(self.stringio.getvalue()))
del self.stringio
sys.stderr = self.sys_stderr
def setHooks(self):
torch._C._jit_set_emit_hooks(self.emitModuleHook, self.emitFunctionHook)
def clearHooks(self):
torch._C._jit_set_emit_hooks(None, None)
def setUp(self):
super().setUp()
# unittest overrides all warning filters and forces all of them to show up
# after we install our own to silence those coming from inside PyTorch.
# This will ensure that our filter still takes precedence.
if not JitTestCase._restored_warnings:
torch.jit.TracerWarning.ignore_lib_warnings()
JitTestCase._restored_warnings = True
self.setHooks()
def tearDown(self):
super().tearDown()
# needs to be cleared because python might be unloaded before
# the callback gets destructed
self.clearHooks()
clear_class_registry()
def assertAllFused(self, graph, except_for=()):
# note this helper collects nodes on 'fast path' only
# i.e. the true blocks of specialized checks
def get_nodes_and_parents_recursively(block, kind, acc):
for node in block.nodes():
if node.kind() == kind:
acc[block].append(node)
elif node.kind() == 'prim::DifferentiableGraph':
get_nodes_and_parents_recursively(node.g('Subgraph'), kind, acc)
elif node.kind() == 'prim::If' and (node.inputs().__next__().node().kind() == 'aten::all' or
node.inputs().__next__().node().kind() == 'prim::TypeCheck' or
node.inputs().__next__().node().kind() == 'prim::RequiresGradCheck'):
get_nodes_and_parents_recursively(node.blocks().__next__(), kind, acc)
else:
for inner_block in node.blocks():
get_nodes_and_parents_recursively(inner_block, kind, acc)
allowed_nodes = {'prim::Constant', FUSION_GROUP, 'prim::BailoutTemplate',
'prim::TupleConstruct', 'prim::If', 'prim::TypeCheck', 'prim::RequiresGradCheck'} | set(except_for)
fusion_groups : dict[torch._C.Block, list[torch._C.Node]] = defaultdict(list)
get_nodes_and_parents_recursively(graph, FUSION_GROUP, fusion_groups)
self.assertTrue(len(fusion_groups) == 1, f'got {graph}')
(graph, fusion_nodes) = next(iter(fusion_groups.items()))
# the block contains one FUSION_GROUP and the rest of nodes are `allowed_nodes`
self.assertTrue(len(fusion_nodes) == 1, f'got {graph}')
self.assertTrue(all(node.kind() in allowed_nodes for node in graph.nodes()),
f'got {graph}')
def _isHookExceptionOk(self, e):
se = str(e)
allowed = ("Could not export Python function",
"closures are not exportable")
for a in allowed:
if a in se:
return True
return False
def _compared_saved_loaded(self, m):
def extract_files(buffer):
# crack open the zip format to get at the main module code
archive = zipfile.ZipFile(buffer)
# check that we have no duplicate names
self.assertEqual(len(set(archive.namelist())), len(archive.namelist()))
files = list(filter(lambda x: x.startswith('archive/code/'), archive.namelist()))
# unwrap all the code files into strings
code_files_str = filter(lambda x: x.endswith('.py'), files)
code_files_stream = (archive.open(f) for f in code_files_str)
code_files = ("".join([line.decode() for line in file]) for file in code_files_stream)
# unpickled all the debug files
debug_files_str = filter(lambda f: f.endswith('.debug_pkl'), files)
debug_files_stream = (archive.open(f) for f in debug_files_str)
debug_files = (pickle.load(f) for f in debug_files_stream)
return code_files, debug_files
# disable the hook while we parse code, otherwise we will re-enter the hook
with torch._jit_internal._disable_emit_hooks():
try:
# short-circuit if this is an empty function or module
if len(m.code) == 0:
return
if isinstance(m, torch._C.ScriptModule):
if len(m._method_names()) == 0:
return
# save the module to a buffer
buffer = io.BytesIO()
torch.jit.save(m, buffer)
# copy the data in the buffer so we can restore it later. This
# is because py2 and py3 have different semantics with zipfile
# and it's easier to just work with a fresh copy each time.
buffer_copy = buffer.getvalue()
code_files, _debug_files = extract_files(buffer)
except RuntimeError as e:
if not self._isHookExceptionOk(e):
raise
else:
return
# import the model again (from a the copy we made of the original)
buffer2 = io.BytesIO(buffer_copy)
imported = torch.jit.load(buffer2)
# save it again
saved_module_buffer_2 = io.BytesIO()
torch.jit.save(imported, saved_module_buffer_2)
saved_module_buffer_2.seek(0)
code_files_2, _debug_files_2 = extract_files(saved_module_buffer_2)
for a, b in zip(code_files, code_files_2):
self.assertMultiLineEqual(a, b)
if isinstance(m, torch._C.ScriptModule):
self.assertTrue(torch._C._ivalue_tags_match(m, imported._c))
def emitFunctionHook(self, func):
# func has invalid names for export, skip the jitter check
if func.name == "<lambda>" or "aten::" in func.name:
return
self._compared_saved_loaded(func)
def emitModuleHook(self, module):
self._compared_saved_loaded(module)
def getExportImportCopyWithPacking(self, m, also_test_file=True, map_location=None):
buffer = io.BytesIO()
m.apply(lambda s: s._pack() if s._c._has_method('_pack') else None)
torch.jit.save(m, buffer)
m.apply(lambda s: s._unpack() if s._c._has_method('_unpack') else None)
buffer.seek(0)
imported = torch.jit.load(buffer, map_location=map_location)
imported.apply(lambda s: s._unpack() if s._c._has_method('_unpack') else None)
if not also_test_file:
return imported
# Ideally we would like to not have to manually delete the file, but NamedTemporaryFile
# opens the file, and it cannot be opened multiple times in Windows. To support Windows,
# close the file after creation and try to remove it manually
f = tempfile.NamedTemporaryFile(delete=False)
try:
f.close()
imported.save(f.name)
result = torch.jit.load(f.name, map_location=map_location)
finally:
os.unlink(f.name)
result.apply(lambda s: s._unpack() if s._c._has_method('_unpack') else None)
return result
def assertGraphContains(self, graph, kind, consider_subgraphs=False):
if consider_subgraphs:
strgraph = str(graph)
count = strgraph.count(kind) - strgraph.count(f'with {kind}')
self.assertTrue(count > 0)
return
def nodes(block):
out = []
for node in block.nodes():
if node.kind() == kind:
out.append(node)
for block in node.blocks():
out += nodes(block)
return out
out_nodes = nodes(graph)
self.assertTrue(len(out_nodes) > 0)
def assertGraphContainsExactly(self, graph, kind, num_kind_nodes, consider_subgraphs=False):
def perform_assert(graph, kind, actual, expected, consider_subgraphs):
if actual == expected:
return
subgraph = 'including' if consider_subgraphs else 'excluding'
raise AssertionError(
f'{graph}\nError: graph contains {actual} {kind} nodes ({subgraph} subgraphs) but expected {expected}')
if consider_subgraphs:
strgraph = str(graph)
count = strgraph.count(kind) - strgraph.count(f'with {kind}')
perform_assert(graph, kind, count, num_kind_nodes,
consider_subgraphs)
return
def nodes(block):
out = []
for node in block.nodes():
if node.kind() == kind:
out.append(node)
for block in node.blocks():
out += nodes(block)
return out
out_nodes = nodes(graph)
perform_assert(graph, kind, len(out_nodes), num_kind_nodes,
consider_subgraphs)
def assertExpectedONNXGraph(self, g, *args, **kwargs):
g = torch.onnx._optimize_trace(g, operator_export_type=OperatorExportTypes.ONNX)
self.assertExpectedGraph(g, *args, **kwargs)
def assertExpectedGraph(self, trace, *args, **kwargs):
if isinstance(trace, torch._C.Graph):
graph = trace
else:
graph = trace.graph()
torch._C._jit_pass_lint(graph)
torch._C._jit_pass_dce(graph)
torch._C._jit_pass_lint(graph)
graph = torch._C._jit_pass_canonicalize(graph)
torch._C._jit_pass_lint(graph)
self.assertExpected(str(graph), *args, **kwargs)
def run_pass(self, name, trace):
if isinstance(trace, torch._C.Graph):
graph = trace
set_graph = False
else:
set_graph = True
graph = trace.graph()
torch._C._jit_pass_lint(graph)
result = getattr(torch._C, '_jit_pass_' + name)(graph)
if result is not None and not isinstance(result, bool):
graph = result
torch._C._jit_pass_lint(graph)
if set_graph:
trace.set_graph(graph)
return graph
def get_frame_vars(self, frames_up):
frame = inspect.currentframe()
if not frame:
raise RuntimeError("failed to inspect frame")
i = 0
while i < frames_up + 1:
frame = frame.f_back
if not frame:
raise RuntimeError("failed to get frame")
i += 1
defined_vars: dict[str, Any] = {}
defined_vars.update(frame.f_locals)
defined_vars.update(frame.f_globals)
return defined_vars
def assertRaisesRegexWithHighlight(self, exception, regex, highlight):
return _AssertRaisesRegexWithHighlightContext(self, exception, regex, highlight)
def checkScriptRaisesRegex(self, script, inputs, exception, regex,
name=None, outputs=None, capture_output=False,
frames_up=1, profiling=ProfilingMode.PROFILING):
"""
Checks that a given function will throw the correct exception,
when executed with normal python, the string frontend, and the
AST frontend. Logic taken from `checkScript` (see comments there
for details)
"""
with enable_profiling_mode_for_profiling_tests():
# Normal Python
with self.assertRaisesRegex(exception, regex):
if isinstance(script, str):
frame = self.get_frame_vars(frames_up)
the_locals: dict[str, Any] = {}
execWrapper(script, glob=frame, loc=the_locals)
frame.update(the_locals)
python_fn = frame[name]
else:
python_fn = script
python_fn(*inputs)
# String frontend
with self.assertRaisesRegex(exception, regex):
if isinstance(script, str):
cu = torch.jit.CompilationUnit(script, _frames_up=frames_up)
string_frontend = getattr(cu, name)
else:
source = textwrap.dedent(inspect.getsource(script))
cu = torch.jit.CompilationUnit(source, _frames_up=frames_up)
string_frontend = getattr(cu, script.__name__)
string_frontend(*inputs)
# Python AST frontend
if not isinstance(script, str):
with self.assertRaisesRegex(exception, regex):
ge = torch.jit.script(python_fn)
ge(*inputs)
def checkBailouts(self, model, inputs, expected):
state = model.get_debug_state()
plan = get_execution_plan(state)
num_bailouts = plan.code.num_bailouts()
for i in range(0, num_bailouts):
plan.code.request_bailout(i)
bailout_outputs = model(*inputs)
self.assertEqual(bailout_outputs, expected)
def checkScript(self,
script,
inputs,
name='func',
optimize=True,
inputs_requires_grad=False,
capture_output=False,
frames_up=1,
profiling=ProfilingMode.PROFILING,
atol=None,
rtol=None):
"""
Checks that a given script generates the same output as the Python
version using the given inputs.
"""
with torch.jit.optimized_execution(optimize):
with enable_profiling_mode_for_profiling_tests():
extra_profile_runs = any(isinstance(x, torch.Tensor) and x.requires_grad for x in inputs)
if isinstance(script, str):
# Compile the string to a Script function
# with enable_profiling_mode():
cu = torch.jit.CompilationUnit(script, _frames_up=frames_up)
# Execute the Python function so we can run it later and get its
# outputs
frame = self.get_frame_vars(frames_up)
the_locals: dict[str, Any] = {}
execWrapper(script, glob=frame, loc=the_locals)
frame.update(the_locals)
python_fn = frame[name]
scripted_fn = getattr(cu, name)
else:
# Check the string frontend first
source = textwrap.dedent(inspect.getsource(script))
self.checkScript(
source,
inputs,
script.__name__,
optimize=optimize,
inputs_requires_grad=inputs_requires_grad,
capture_output=capture_output,
profiling=profiling,
frames_up=2)
# Continue checking the Python frontend
scripted_fn = torch.jit.script(script, _frames_up=1)
python_fn = script
if inputs_requires_grad:
recording_inputs = do_input_map(lambda t: t.detach().requires_grad_(), inputs)
else:
recording_inputs = inputs
if capture_output:
with self.capture_stdout() as script_stdout:
script_outputs = scripted_fn(*recording_inputs)
with self.capture_stdout():
opt_script_outputs = scripted_fn(*recording_inputs)
with self.capture_stdout():
python_outputs = python_fn(*inputs)
if not IS_WINDOWS:
self.assertExpected(script_stdout[0], subname='stdout')
self.assertEqual(python_outputs, opt_script_outputs, atol=atol, rtol=rtol)
else:
# profiling run
script_outputs = scripted_fn(*recording_inputs)
if inputs_requires_grad or extra_profile_runs:
opt_script_outputs = scripted_fn(*recording_inputs)
# optimized run
opt_script_outputs = scripted_fn(*recording_inputs)
if TEST_BAILOUTS:
self.checkBailouts(scripted_fn, inputs, opt_script_outputs)
python_outputs = python_fn(*inputs)
self.assertEqual(python_outputs, script_outputs, atol=atol, rtol=rtol)
self.assertEqual(script_outputs, opt_script_outputs, atol=atol, rtol=rtol)
return scripted_fn
def checkTrace(self, func, reference_tensors, input_tensors=None,
drop=None, allow_unused=False, verbose=False,
inputs_require_grads=True, check_tolerance=1e-5, export_import=True,
_force_outplace=False, grad_atol=None, grad_rtol=None):
# TODO: check gradients for parameters, not just inputs
def allSum(vs):
# drop allows us to remove some values from ever being used
# to test unused outputs
if drop is not None:
vs = vs[:-drop]
# we don't want all the grad for all the outputs to be the same
# so we multiply each by a constant
return sum(math.log(i + 2) * v.sum() for i, v in enumerate(vs) if v is not None)
if input_tensors is None:
input_tensors = reference_tensors
def flatten_inputs(inputs):
def input_reduce(input, fn, acc):
if isinstance(input, torch.Tensor):
fn(input, acc)
elif isinstance(input, dict):
reduce(lambda acc, key: input_reduce(input[key], fn, acc), input, acc)
else:
reduce(lambda acc, val: input_reduce(val, fn, acc), input, acc)
return acc
return tuple(input_reduce(recording_inputs, lambda t, acc: acc.append(t), []))
nograd_inputs = reference_tensors
if inputs_require_grads:
recording_inputs = do_input_map(lambda t: t.clone().requires_grad_(), reference_tensors)
flattened_recording_inputs = flatten_inputs(recording_inputs)
else:
recording_inputs = reference_tensors
# `check_trace` is set to False because check_trace is run with @no_grad
# Also, `checkTrace` already does all the checks
# against python function
ge = torch.jit.trace(func, input_tensors, check_tolerance=check_tolerance,
_force_outplace=_force_outplace, check_trace=False)
if export_import:
ge = self.getExportImportCopy(ge)
if verbose:
print(ge.graph)
# test no gradients case
outputs = func(*nograd_inputs)
outputs_ge = ge(*nograd_inputs)
self.assertEqual(outputs, outputs_ge)
# test gradients case
outputs = func(*recording_inputs)
if inputs_require_grads:
grads = torch.autograd.grad(allSum(outputs), flattened_recording_inputs,
allow_unused=allow_unused)
outputs_ge = ge(*recording_inputs)
if inputs_require_grads:
grads_ge = torch.autograd.grad(allSum(outputs_ge), flattened_recording_inputs,
allow_unused=allow_unused)
self.assertEqual(outputs, outputs_ge)
if inputs_require_grads:
self.assertEqual(grads, grads_ge, atol=grad_atol, rtol=grad_rtol)
# test the grad grad case
outputs = func(*recording_inputs)
l1 = allSum(outputs)
if inputs_require_grads:
grads = torch.autograd.grad(l1, flattened_recording_inputs, create_graph=True,
allow_unused=allow_unused)
if inputs_require_grads:
l2 = (allSum(grads) * l1)
grads2 = torch.autograd.grad(l2, flattened_recording_inputs, allow_unused=allow_unused)
if inputs_require_grads:
recording_inputs = do_input_map(lambda t: Variable(t, requires_grad=True), reference_tensors)
flattened_recording_inputs = flatten_inputs(recording_inputs)
outputs_ge = ge(*recording_inputs)
l1_ge = allSum(outputs_ge)
if inputs_require_grads:
grads_ge = torch.autograd.grad(
l1_ge, flattened_recording_inputs, create_graph=True, allow_unused=allow_unused)
if inputs_require_grads:
l2_ge = (allSum(grads_ge) * l1_ge)
grads2_ge = torch.autograd.grad(l2_ge, flattened_recording_inputs, allow_unused=allow_unused)
self.assertEqual(outputs, outputs_ge)
if inputs_require_grads:
self.assertEqual(grads, grads_ge, atol=grad_atol, rtol=grad_rtol)
for g2, g2_ge in zip(grads2, grads2_ge):
if g2 is None and g2_ge is None:
continue
self.assertEqual(g2, g2_ge, atol=8e-4, rtol=8e-4)
return ge
def checkModule(self, nn_module, args):
"""
Check that a nn.Module's results in Script mode match eager and that it
can be exported
"""
sm = torch.jit.script(nn_module)
with freeze_rng_state():
eager_out = nn_module(*args)
with freeze_rng_state():
script_out = sm(*args)
self.assertEqual(eager_out, script_out)
self.assertExportImportModule(sm, args)
return sm
class NoTracerWarnContextManager:
def __enter__(self):
self.prev = torch._C._jit_get_tracer_state_warn()
torch._C._jit_set_tracer_state_warn(False)
def __exit__(self, *args):
torch._C._jit_set_tracer_state_warn(self.prev)
@contextmanager
def inline_everything_mode(should_inline):
old = torch._C._jit_get_inline_everything_mode()
torch._C._jit_set_inline_everything_mode(should_inline)
try:
yield
finally:
torch._C._jit_set_inline_everything_mode(old)
@contextmanager
def set_fusion_group_inlining(inlining):
old = torch._C._debug_get_fusion_group_inlining()
torch._C._debug_set_fusion_group_inlining(inlining)
try:
yield
finally:
torch._C._debug_set_fusion_group_inlining(old)
# note: not re-entrant, use unnested only
@contextmanager
def disable_autodiff_subgraph_inlining(enabled=True):
torch._C._debug_set_autodiff_subgraph_inlining(not enabled)
try:
yield
finally:
torch._C._debug_set_autodiff_subgraph_inlining(True)
def _inline_everything(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
with inline_everything_mode(True):
fn(*args, **kwargs)
return wrapper
# this exists for forward compatibility reasons temporarily.
# TODO(suo) remove
def _tmp_donotuse_dont_inline_everything(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
with inline_everything_mode(False):
fn(*args, **kwargs)
return wrapper
# make it easy to quicky define/trace a function for these tests
def _trace(*args, **kwargs):
def wrapper(func):
return torch.jit.trace(func, args, **kwargs)
return wrapper
def enable_cpu_fuser(fn):
def wrapper(*args, **kwargs):
torch._C._jit_override_can_fuse_on_cpu_legacy(True)
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_set_te_must_use_llvm_cpu(False)
try:
fn(*args, **kwargs)
finally:
torch._C._jit_override_can_fuse_on_cpu_legacy(False)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_set_te_must_use_llvm_cpu(True)
return wrapper
def enable_cpu_fuser_if(cond):
if cond:
return enable_cpu_fuser
else:
def noop_fuser(fn):
def wrapper(*args, **kwargs):
return fn(*args, **kwargs)
return wrapper
return noop_fuser
def get_forward(c):
return c._get_method('forward')
def get_forward_graph(c):
return c._get_method('forward').graph
def get_module_method(m, module, method):
return m._c.getattr(module)._get_method(method)
def attrs_with_prefix(module, prefix):
return [x for x, _ in module._modules._c.items()
if x.startswith(prefix)]
def warmup_backward(f, *args):
profiling_count = 3
results = []
for _ in range(profiling_count):
if len(args) > 0:
r = torch.autograd.grad(f, *args)
results.append(r)
else:
f.backward(retain_graph=True)
return results
# TODO: Remove me once https://bugs.python.org/issue42666 is resolved
def make_global(*args):
for arg in args:
setattr(sys.modules[arg.__module__], arg.__name__, arg)
# Helper function to eval Python3 code without causing a syntax error for
# this file under py2
def _get_py3_code(code, fn_name):
with tempfile.TemporaryDirectory() as tmp_dir:
script_path = os.path.join(tmp_dir, 'script.py')
with open(script_path, 'w') as f:
f.write(code)
spec = importlib.util.spec_from_file_location(fn_name, script_path)
module = importlib.util.module_from_spec(spec)
loader = spec.loader
assert isinstance(loader, Loader) # Assert type to meet MyPy requirement
loader.exec_module(module)
fn = getattr(module, fn_name)
return fn
class TensorExprTestOptions:
def __init__(self) -> None:
self.old_profiling_executor = torch._C._jit_set_profiling_executor(True)
self.old_profiling_mode = torch._C._get_graph_executor_optimize(True)
self.old_cpu_fuser_state = torch._C._jit_can_fuse_on_cpu()
self.old_gpu_fuser_state = torch._C._jit_can_fuse_on_gpu()
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
self.texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()
torch._C._jit_set_texpr_fuser_enabled(True)
self.old_fusion_inlining = torch._C._debug_get_fusion_group_inlining()
torch._C._debug_set_fusion_group_inlining(False)
self.old_te_must_use_llvm_cpu = torch._C._jit_get_te_must_use_llvm_cpu()
torch._C._jit_set_te_must_use_llvm_cpu(False)
def restore(self):
torch._C._jit_set_profiling_executor(self.old_profiling_executor)
torch._C._get_graph_executor_optimize(self.old_profiling_mode)
torch._C._jit_set_texpr_fuser_enabled(self.texpr_fuser_state)
torch._C._jit_override_can_fuse_on_gpu(self.old_gpu_fuser_state)
torch._C._jit_override_can_fuse_on_cpu(self.old_cpu_fuser_state)
torch._C._debug_set_fusion_group_inlining(self.old_fusion_inlining)
torch._C._jit_set_te_must_use_llvm_cpu(self.old_te_must_use_llvm_cpu)
def clone_inputs(args):
inputs: list[Union[torch.Tensor, list[torch.Tensor]]] = []
for arg in args:
if isinstance(arg, torch.Tensor):
inputs.append(arg.detach().clone())
elif is_iterable_of_tensors(arg):
inputs.append([t.detach().clone() for t in arg])
else:
inputs.append(arg)
return inputs
def get_traced_sample_variant_pairs(device, dtype, op):
# tuples of (variant, sample)
outputs: list[tuple[Any, Any]] = []
samples = op.sample_inputs(device, dtype)
# Acquires variants to test
func = op.get_op()
method = op.get_method()
variants = {
# TODO: inplace tests currently fail, fix and add inplace variant
'function': func, 'method': method,
}
# TODO: find better way to standardize on op registration itself..
has_fake_function = op.name in ["resize_", 'resize_as_']
if has_fake_function:
variants = {'method': getattr(torch.Tensor, op.name)}
# In eager mode, these ops can take (Tensor, bool) args; but in
# JIT they can only take (Tensor, Scalar), and bool is not a
# scalar in the JIT type system. So to test these in JIT, the bool
# is converted to an int for the test.
ops_with_unsupported_bool_args = [
{
"name": "div_floor_rounding",
"arg_idx": [0],
},
{
"name": "div_no_rounding_mode",
"arg_idx": [0],
},
{
"name": "div_trunc_rounding",
"arg_idx": [0],
},
{
"name": "index_fill",
"arg_idx": [2],
},
{
"name": "full_like",
"arg_idx": [0],
},
{
"name": "mul",
"arg_idx": [0],
},
{
"name": "new_full",
"arg_idx": [1],
},
]
# doesn't support tracing
if has_fake_function:
return outputs
for sample in samples:
for variant in variants.values():
if variant is None:
continue
if is_lambda(variant):
continue
matching_ops = filter(lambda x: op.formatted_name == x["name"], ops_with_unsupported_bool_args)
for op_data in matching_ops:
for idx in op_data["arg_idx"]:
args = list(sample.args)
if len(sample.args) > idx and isinstance(sample.args[idx], bool):
args[idx] = int(args[idx])
sample.args = tuple(args)
outputs.append((variant, sample))
return outputs
# types.LambdaType gave false positives
def is_lambda(lamb):
LAMBDA = lambda: 0 # noqa: E731
return isinstance(lamb, type(LAMBDA)) and lamb.__name__ == LAMBDA.__name__
```
|
=================================================================================================================================
SOURCE CODE FILE: logging_tensor.py
LINES: 1
SIZE: 7.13 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\logging_tensor.py
ENCODING: utf-8
```py
# mypy: ignore-errors
import torch
from torch.utils._pytree import tree_map
from typing import Optional
from collections.abc import Iterator
import logging
import contextlib
import itertools
from torch.utils._python_dispatch import TorchDispatchMode
from torch.utils.weak import WeakTensorKeyDictionary
import functools
from torch._C._profiler import gather_traceback, symbolize_tracebacks
logger = logging.getLogger("LoggingTensor")
_dtype_abbrs = {
torch.bfloat16: "bf16",
torch.float64: "f64",
torch.float32: "f32",
torch.float16: "f16",
torch.complex32: "c32",
torch.complex64: "c64",
torch.complex128: "c128",
torch.int8: "i8",
torch.int16: "i16",
torch.int32: "i32",
torch.int64: "i64",
torch.bool: "b8",
torch.uint8: "u8",
torch.float8_e4m3fn: "f8e4m3fn",
torch.float8_e5m2: "f8e5m2",
torch.float8_e4m3fnuz: "f8e4m3fnuz",
torch.float8_e5m2fnuz: "f8e5m2fnuz",
}
# How the chain of calls works for LoggingTensor:
# 1. Call torch.sin
# 2. Attempt __torch_function__. In LoggingTensor torch function is disabled so we bypass it entirely
# 3. Enter dispatcher, wind your way through Autograd
# 4. Hit Python dispatch key, call __torch_dispatch__
# This Tensor can work with autograd in two ways:
# - The wrapped Tensor does not require gradients. In that case, the LoggingTensor
# can require gradients if the user asks for it as a constructor kwarg.
# - The wrapped Tensor can require gradients. In that case autograd will be tracked
# for the wrapped Tensor and the LoggingTensor itself cannot require gradients.
# WARNING: We allow these two possibilities for testing purposes. You should NEVER use both in a single
# test or you might get surprising behavior.
# TODO: TensorBase should work
class LoggingTensor(torch.Tensor):
elem: torch.Tensor
__slots__ = ['elem']
context = contextlib.nullcontext
@staticmethod
def __new__(cls, elem, *args, **kwargs):
# The wrapping tensor (LoggingTensor) shouldn't hold any
# memory for the class in question, but it should still
# advertise the same device as before
r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
cls, elem.size(),
strides=elem.stride(), storage_offset=elem.storage_offset(),
# TODO: clone storage aliasing
dtype=elem.dtype, layout=elem.layout,
device=elem.device, requires_grad=kwargs.get("requires_grad", False)
)
# ...the real tensor is held as an element on the tensor.
r.elem = elem.detach() if r.requires_grad else elem
return r
def __repr__(self):
return super().__repr__(tensor_contents=f"{self.elem}")
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
def unwrap(e):
return e.elem if isinstance(e, cls) else e
def wrap(e):
return cls(e) if isinstance(e, torch.Tensor) else e
with cls.context():
rs = tree_map(wrap, func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs)))
logging.getLogger("LoggingTensor").info(f"{func.__module__}.{func.__name__}", args, kwargs, rs) # noqa: G004
return rs
class LoggingTensorMode(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
rs = func(*args, **kwargs)
logging.getLogger("LoggingTensor").info(f"{func.__module__}.{func.__name__}", args, kwargs, rs) # noqa: G004
return rs
class LoggingTensorReentrant(LoggingTensor):
context = torch.overrides.enable_reentrant_dispatch
# https://stackoverflow.com/questions/36408496/python-logging-handler-to-append-to-list
class LoggingTensorHandler(logging.Handler):
def __init__(
self, log_list: list[str], use_shortid_for_all_tensors: bool,
with_type: bool, tracebacks_list: Optional[list]) -> None:
logging.Handler.__init__(self)
self.log_list = log_list
self.use_shortid_for_all_tensors = use_shortid_for_all_tensors
self.tracebacks_list = tracebacks_list
self.memo = WeakTensorKeyDictionary()
self.next_id = 0
self.with_type = with_type
def _shortid(self, t: torch.Tensor) -> int:
if t not in self.memo:
self.memo[t] = self.next_id
self.next_id += 1
return self.memo[t]
def _fmt(self, a: object, with_type: bool = False) -> str:
cond_cls = torch.Tensor if self.use_shortid_for_all_tensors else LoggingTensor
if isinstance(a, cond_cls):
maybe_type = ""
if with_type and self.with_type:
maybe_type = f": {_dtype_abbrs[a.dtype]}[{', '.join(map(str, a.shape))}]"
x = f"${self._shortid(a)}{maybe_type}"
return x
else:
return repr(a)
def emit(self, record):
fmt_args = ", ".join(
itertools.chain(
(str(tree_map(self._fmt, a)) for a in record.args[0]),
(f"{k}={str(tree_map(self._fmt, v))}" for k, v in record.args[1].items()),
)
)
fmt_rets = tree_map(functools.partial(self._fmt, with_type=True), record.args[2])
self.log_list.append(f'{fmt_rets} = {record.msg}({fmt_args})')
if self.tracebacks_list is not None:
self.tracebacks_list.append(record.traceback)
def log_input(name: str, var: object) -> None:
logger.info("input", (name,), {}, var) # noqa: PLE1205
class GatherTraceback(logging.Filter):
def __init__(self, python=True, script=True, cpp=False):
self.python = python
self.script = script
self.cpp = cpp
def filter(self, record):
record.traceback = gather_traceback(python=self.python, script=self.script, cpp=self.cpp)
return True
@contextlib.contextmanager
def capture_logs(is_mode=False, python_tb=False, script_tb=False, cpp_tb=False) -> Iterator[list[str]]:
collect_traceback = python_tb or script_tb or cpp_tb
log_list: list[str] = []
tracebacks_list: list[str] = []
handler = LoggingTensorHandler(
log_list,
with_type=True,
use_shortid_for_all_tensors=is_mode,
tracebacks_list=tracebacks_list if collect_traceback else None
)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
logger.propagate = False
if collect_traceback:
logger.addFilter(GatherTraceback(python=python_tb, script=script_tb, cpp=cpp_tb))
try:
if collect_traceback:
yield log_list, tracebacks_list
else:
yield log_list
finally:
symbolized_tracebacks = symbolize_tracebacks(tracebacks_list)
tracebacks_list.clear()
tracebacks_list.extend(symbolized_tracebacks)
logger.removeHandler(handler)
@contextlib.contextmanager
def capture_logs_with_logging_tensor_mode(python_tb=False, script_tb=False, cpp_tb=False):
with LoggingTensorMode(), capture_logs(True, python_tb, script_tb, cpp_tb) as logs:
yield logs
```
|
================================================================================================================================
SOURCE CODE FILE: logging_utils.py
LINES: 1
SIZE: 8.25 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\logging_utils.py
ENCODING: utf-8
```py
# mypy: ignore-errors
import torch._dynamo.test_case
import unittest.mock
import os
import contextlib
import torch._logging
import torch._logging._internal
from contextlib import AbstractContextManager
from typing import Callable
from torch._dynamo.utils import LazyString
from torch._inductor import config as inductor_config
import logging
import io
@contextlib.contextmanager
def preserve_log_state():
prev_state = torch._logging._internal._get_log_state()
torch._logging._internal._set_log_state(torch._logging._internal.LogState())
try:
yield
finally:
torch._logging._internal._set_log_state(prev_state)
torch._logging._internal._init_logs()
def log_settings(settings):
exit_stack = contextlib.ExitStack()
settings_patch = unittest.mock.patch.dict(os.environ, {"TORCH_LOGS": settings})
exit_stack.enter_context(preserve_log_state())
exit_stack.enter_context(settings_patch)
torch._logging._internal._init_logs()
return exit_stack
def log_api(**kwargs):
exit_stack = contextlib.ExitStack()
exit_stack.enter_context(preserve_log_state())
torch._logging.set_logs(**kwargs)
return exit_stack
def kwargs_to_settings(**kwargs):
INT_TO_VERBOSITY = {10: "+", 20: "", 40: "-"}
settings = []
def append_setting(name, level):
if isinstance(name, str) and isinstance(level, int) and level in INT_TO_VERBOSITY:
settings.append(INT_TO_VERBOSITY[level] + name)
return
else:
raise ValueError("Invalid value for setting")
for name, val in kwargs.items():
if isinstance(val, bool):
settings.append(name)
elif isinstance(val, int):
append_setting(name, val)
elif isinstance(val, dict) and name == "modules":
for module_qname, level in val.items():
append_setting(module_qname, level)
else:
raise ValueError("Invalid value for setting")
return ",".join(settings)
# Note on testing strategy:
# This class does two things:
# 1. Runs two versions of a test:
# 1a. patches the env var log settings to some specific value
# 1b. calls torch._logging.set_logs(..)
# 2. patches the emit method of each setup handler to gather records
# that are emitted to each console stream
# 3. passes a ref to the gathered records to each test case for checking
#
# The goal of this testing in general is to ensure that given some settings env var
# that the logs are setup correctly and capturing the correct records.
def make_logging_test(**kwargs):
def wrapper(fn):
@inductor_config.patch({"fx_graph_cache": False})
def test_fn(self):
torch._dynamo.reset()
records = []
# run with env var
if len(kwargs) == 0:
with self._handler_watcher(records):
fn(self, records)
else:
with log_settings(kwargs_to_settings(**kwargs)), self._handler_watcher(records):
fn(self, records)
# run with API
torch._dynamo.reset()
records.clear()
with log_api(**kwargs), self._handler_watcher(records):
fn(self, records)
return test_fn
return wrapper
def make_settings_test(settings):
def wrapper(fn):
def test_fn(self):
torch._dynamo.reset()
records = []
# run with env var
with log_settings(settings), self._handler_watcher(records):
fn(self, records)
return test_fn
return wrapper
class LoggingTestCase(torch._dynamo.test_case.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._exit_stack.enter_context(
unittest.mock.patch.dict(os.environ, {"___LOG_TESTING": ""})
)
cls._exit_stack.enter_context(
unittest.mock.patch("torch._dynamo.config.suppress_errors", True)
)
cls._exit_stack.enter_context(
unittest.mock.patch("torch._dynamo.config.verbose", False)
)
@classmethod
def tearDownClass(cls):
cls._exit_stack.close()
torch._logging._internal.log_state.clear()
torch._logging._init_logs()
def hasRecord(self, records, m):
return any(m in r.getMessage() for r in records)
def getRecord(self, records, m):
record = None
for r in records:
# NB: not r.msg because it looks like 3.11 changed how they
# structure log records
if m in r.getMessage():
self.assertIsNone(
record,
msg=LazyString(
lambda: f"multiple matching records: {record} and {r} among {records}"
),
)
record = r
if record is None:
self.fail(f"did not find record with {m} among {records}")
return record
# This patches the emit method of each handler to gather records
# as they are emitted
def _handler_watcher(self, record_list):
exit_stack = contextlib.ExitStack()
def emit_post_hook(record):
nonlocal record_list
record_list.append(record)
# registered logs are the only ones with handlers, so patch those
for log_qname in torch._logging._internal.log_registry.get_log_qnames():
logger = logging.getLogger(log_qname)
num_handlers = len(logger.handlers)
self.assertLessEqual(
num_handlers,
2,
"All pt2 loggers should only have at most two handlers (debug artifacts and messages above debug level).",
)
self.assertGreater(num_handlers, 0, "All pt2 loggers should have more than zero handlers")
for handler in logger.handlers:
old_emit = handler.emit
def new_emit(record):
old_emit(record)
emit_post_hook(record)
exit_stack.enter_context(
unittest.mock.patch.object(handler, "emit", new_emit)
)
return exit_stack
def logs_to_string(module, log_option):
"""Example:
logs_to_string("torch._inductor.compile_fx", "post_grad_graphs")
returns the output of TORCH_LOGS="post_grad_graphs" from the
torch._inductor.compile_fx module.
"""
log_stream = io.StringIO()
handler = logging.StreamHandler(stream=log_stream)
@contextlib.contextmanager
def tmp_redirect_logs():
try:
logger = torch._logging.getArtifactLogger(module, log_option)
logger.addHandler(handler)
yield
finally:
logger.removeHandler(handler)
def ctx_manager():
exit_stack = log_settings(log_option)
exit_stack.enter_context(tmp_redirect_logs())
return exit_stack
return log_stream, ctx_manager
def multiple_logs_to_string(module: str, *log_options: str) -> tuple[list[io.StringIO], Callable[[], AbstractContextManager[None]]]:
"""Example:
multiple_logs_to_string("torch._inductor.compile_fx", "pre_grad_graphs", "post_grad_graphs")
returns the output of TORCH_LOGS="pre_graph_graphs, post_grad_graphs" from the
torch._inductor.compile_fx module.
"""
log_streams = [io.StringIO() for _ in range(len(log_options))]
handlers = [logging.StreamHandler(stream=log_stream) for log_stream in log_streams]
@contextlib.contextmanager
def tmp_redirect_logs():
loggers = [torch._logging.getArtifactLogger(module, option) for option in log_options]
try:
for logger, handler in zip(loggers, handlers):
logger.addHandler(handler)
yield
finally:
for logger, handler in zip(loggers, handlers):
logger.removeHandler(handler)
def ctx_manager() -> AbstractContextManager[None]:
exit_stack = log_settings(", ".join(log_options))
exit_stack.enter_context(tmp_redirect_logs())
return exit_stack # type: ignore[return-value]
return log_streams, ctx_manager
```
|
==================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.12 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\opinfo\__init__.py
ENCODING: utf-8
```py
# mypy: ignore-errors
import torch.testing._internal.opinfo.core
import torch.testing._internal.opinfo.definitions
```
|
==============================================================================================================================
SOURCE CODE FILE: core.py
LINES: 1
SIZE: 123.93 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\opinfo\core.py
ENCODING: utf-8
```py
# mypy: ignore-errors
import collections
import collections.abc
import contextlib
import logging
import math
import operator
import unittest
from abc import ABC, abstractmethod
from collections.abc import Iterable
from dataclasses import asdict, dataclass, field
from enum import Enum
from functools import partial
from itertools import product
from typing import Any, Callable, Optional, TypeVar, Union
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_device_type import (
skipCPUIfNoFFT,
tol,
toleranceOverride,
)
from torch.testing._internal.common_dtype import (
_dispatch_dtypes,
floating_and_complex_types,
floating_and_complex_types_and,
floating_types,
get_all_dtypes,
)
from torch.testing._internal.common_utils import (
extract_test_fn,
IS_FBCODE,
is_iterable_of_tensors,
noncontiguous_like,
OPINFO_SAMPLE_INPUT_INDEX,
TEST_WITH_ROCM,
torch_to_numpy_dtype_dict,
TrackedInputIter,
USE_PYTEST,
)
from torch.testing._internal.opinfo import utils
from torchgen.utils import dataclass_repr
# setup logging
log = logging.getLogger(__name__)
# Reasonable testing sizes for dimensions
L = 20
M = 10
S = 5
XS = 3
# Unique value to distinguish default from anything else
_NOTHING = object()
# Extension of getattr to support qualified names
# e.g. _getattr_qual(torch, 'linalg.norm') -> torch.linalg.norm
def _getattr_qual(obj, name, default=_NOTHING):
try:
for path in name.split("."):
obj = getattr(obj, path)
return obj
except AttributeError:
if default is not _NOTHING:
return default
else:
raise
class DecorateInfo:
"""Describes which test, or type of tests, should be wrapped in the given
decorators when testing an operator. Any test that matches all provided
arguments will be decorated. The decorators will only be applied if the
active_if argument is True."""
__slots__ = [
"decorators",
"cls_name",
"test_name",
"device_type",
"dtypes",
"active_if",
]
def __init__(
self,
decorators,
cls_name=None,
test_name=None,
*,
device_type=None,
dtypes=None,
active_if=True,
):
self.decorators = (
list(decorators)
if isinstance(decorators, collections.abc.Sequence)
else [decorators]
)
self.cls_name = cls_name
self.test_name = test_name
self.device_type = device_type
self.dtypes = dtypes
self.active_if = active_if
# Validate dtypes
if self.dtypes is not None:
for dtype in self.dtypes:
assert isinstance(dtype, torch.dtype)
def is_active(self, cls_name, test_name, device_type, dtype, param_kwargs):
return (
self.active_if
and (self.cls_name is None or self.cls_name == cls_name)
and (self.test_name is None or self.test_name == test_name)
and (self.device_type is None or self.device_type == device_type)
and (self.dtypes is None or dtype in self.dtypes)
# Support callables over kwargs to determine if the decorator is active.
and (
self.active_if(param_kwargs)
if isinstance(self.active_if, Callable)
else self.active_if
)
)
# FIXME
# Note: historically the 'input' kwarg had to be a Tensor or TensorList, but we are trying
# to support scalar inputs, too. Some tests still depend on 'input' being a Tensor
# or TensorList, however.
class SampleInput:
"""Represents sample inputs to a function."""
__slots__ = [
"input",
"args",
"kwargs",
"output_process_fn_grad",
"broadcasts_input",
"name",
]
def __init__(
self,
input,
*var_args,
args=None,
kwargs=None,
output_process_fn_grad=None,
broadcasts_input=None,
name=None,
**var_kwargs,
):
# input is the first input to the op and is typically either a Tensor or TensorList (Sequence[Tensor]).
# This follows the typical pattern where for Tensor inputs op(t, ...) = t.op(...).
self.input = input
# Allow calling either as SampleInput(input, args=args, kwargs=kwargs), or as
# SampleInput(input, *args, **kwargs) but not to mix the two forms
if args is not None or kwargs is not None:
assert (
not var_args and not var_kwargs
), """
A SampleInput can be constructed "naturally" with *args and **kwargs or by
explicitly setting the "args" and "kwargs" parameters, but the two
methods of construction cannot be mixed!"""
elif len(var_args) or len(var_kwargs):
assert (
output_process_fn_grad is None
and broadcasts_input is None
and name is None
), """
A SampleInput constructed "naturally" with *args and **kwargs
cannot specify additional metadata in keyword arguments"""
self.args = args if args is not None else var_args
assert isinstance(self.args, tuple)
self.kwargs = kwargs if kwargs is not None else var_kwargs
assert isinstance(self.kwargs, dict)
self.output_process_fn_grad = (
output_process_fn_grad
if output_process_fn_grad is not None
else lambda x: x
)
self.name = name if name is not None else ""
# Specifies if `self.input` is broadcasted or not,
# given that the operator supports broadcasting.
# This field is used to verify the behavior for inplace variant.
#
# If a SampleInput is marked with `broadcasts_input=True`,
# it is verified that we get a `RuntimeError` with this sample,
# and inplace variant. Also inplace grad{grad} tests are skipped,
# for such inputs (as they will error out otherwise).
self.broadcasts_input = (
broadcasts_input if broadcasts_input is not None else False
)
def with_metadata(
self, *, output_process_fn_grad=None, broadcasts_input=None, name=None
):
if output_process_fn_grad is not None:
self.output_process_fn_grad = output_process_fn_grad
if broadcasts_input is not None:
self.broadcasts_input = broadcasts_input
if name is not None:
self.name = name
return self
def _repr_helper(self, formatter):
# Helper function to return the details of the SampleInput as `str`
# It consolidates all the fields of SampleInput and allows,
# formatting the fields like `input`, `args`, etc with `formatter`
# callable to customize the representation.
# Look at `summary` method for example.
arguments = [
f"input={formatter(self.input)}",
f"args={formatter(self.args)}",
f"kwargs={formatter(self.kwargs)}",
f"broadcasts_input={self.broadcasts_input}",
f"name={repr(self.name)}",
]
return f'SampleInput({", ".join(a for a in arguments if a is not None)})'
def __repr__(self):
return self._repr_helper(lambda x: x)
def summary(self):
# Returns the SampleInput details in a more
# friendly format.
# It formats `Tensor` and `TensorList`
# in a more condensed representation.
def formatter(arg):
# Format any instance of `Tensor` (standalone, in list, or in dict)
# by Tensor[TensorShape]
# Eg. Tensor with shape (3, 4) is formatted as Tensor[3, 4]
if isinstance(arg, torch.Tensor):
shape = str(tuple(arg.shape))
dtype = str(arg.dtype)
device = str(arg.device)
contiguity_suffix = ""
# NB: sparse CSR tensors annoyingly return is_sparse=False
is_sparse = arg.is_sparse or arg.layout == torch.sparse_csr
if not is_sparse and not arg.is_contiguous():
contiguity_suffix = ", contiguous=False"
return f'Tensor[size={shape}, device="{device}", dtype={dtype}{contiguity_suffix}]'
elif isinstance(arg, dict):
return {k: formatter(v) for k, v in arg.items()}
elif is_iterable_of_tensors(arg):
return "TensorList[" + ", ".join(map(formatter, arg)) + "]"
elif isinstance(arg, (list, tuple)): # Handle list, tuple
return "(" + ",".join(map(formatter, arg)) + ")"
return repr(arg)
return self._repr_helper(formatter)
# Applies the transform f(t) -> t to each tensor and dtype in the SampleInput
def transform(self, f):
def tt(t):
def _tt(t):
with torch.no_grad():
return f(t)
if isinstance(t, torch.Tensor):
return _tt(t)
elif isinstance(t, torch.dtype):
return _tt(t)
elif isinstance(t, list):
return list(map(tt, t))
elif isinstance(t, tuple):
return tuple(map(tt, t))
elif isinstance(t, dict):
return {k: tt(v) for k, v in t.items()}
else:
return t
sample_tt_input, tt_args, tt_kwargs = (
tt(self.input),
tt(self.args),
tt(self.kwargs),
)
# Note the transformed SampleInput assumes metadata like output_process_fn_grad is still valid!
return SampleInput(
sample_tt_input,
args=tt_args,
kwargs=tt_kwargs,
output_process_fn_grad=self.output_process_fn_grad,
broadcasts_input=self.broadcasts_input,
name=self.name + "_transformed",
)
# Returns the NumPy version of the sample input object in the form of a tuple: (input, args, kwargs)
# Converts tensors to ndarrays by calling .detach().cpu().numpy() on them
# Converts dtypes by remapping them using torch_to_numpy_dtype_dict
def numpy(self):
def to_numpy(t):
if isinstance(t, torch.Tensor):
if t.dtype is torch.bfloat16:
return t.detach().cpu().to(torch.float32).numpy()
if t.dtype is torch.chalf:
return t.detach().cpu().to(torch.cfloat).numpy()
return t.detach().cpu().numpy()
elif isinstance(t, torch.dtype):
return torch_to_numpy_dtype_dict[t]
return t
return self.transform(to_numpy)
def noncontiguous(self):
def to_noncontiguous(t):
if isinstance(t, torch.Tensor):
return noncontiguous_like(t)
elif isinstance(t, torch.dtype):
return t
return t
return self.transform(to_noncontiguous)
NumericsFilter = collections.namedtuple("NumericsFilter", ["condition", "safe_val"])
class ErrorInput:
"""
A SampleInput that will cause the operation to throw an error plus information
about the resulting error.
"""
__slots__ = ["sample_input", "error_type", "error_regex"]
def __init__(self, sample_input, *, error_type=RuntimeError, error_regex):
self.sample_input = sample_input
self.error_type = error_type
self.error_regex = error_regex
class AliasInfo:
"""Class holds alias information. For example, torch.abs ->
torch.absolute, torch.Tensor.absolute, torch.Tensor.absolute_
"""
def __init__(self, alias_name):
self.name = alias_name
self.op = _getattr_qual(torch, alias_name)
self.method_variant = getattr(torch.Tensor, alias_name, None)
self.inplace_variant = getattr(torch.Tensor, alias_name + "_", None)
def __call__(self, *args, **kwargs):
return self.op(*args, **kwargs)
# Note [OpInfos]
# ~~~~~~~~~~~~~~
#
# The majority of this note was written shortly after the PyTorch 1.9 release.
# If you notice it's out-of-date or think it could be improved then please
# file an issue.
#
# See also: the OpInfo tracker (https://github.com/pytorch/pytorch/issues/54261)
# See also: "Writing Test Templates" in common_device_type.py to learn how to
# parametrize a test template using OpInfos.
# See also: PyTorch's GitHub wiki on running and writing tests
# https://github.com/pytorch/pytorch/wiki/Running-and-writing-tests
# See also: ModuleInfos, OpInfo's sister class, defined in common_modules.py
#
# An OpInfo is a collection of metadata related to a PyTorch operator. This
# metadata is used to generate tests that validate properties of the operator,
# like if it implements the correct gradient formula.
#
# WHY OPINFOS?
# ~~~~~~~~~~~~
#
# OpInfos are principally intended to do three things:
#
# 1) to allow systematic testing over all PyTorch's operators
# 2) to simplify operating testing by autogenerating many tests
# 3) to allow systems (like autograd, torchscript, fx, nnc...) to test
# against every PyTorch operator
#
# All these goals are still a work in progress. Not every operator has an
# OpInfo, and some operator tests that could be automatically generated
# still have to be written manually.
#
# It's helpful to understand that OpInfos are both about test simplification and
# modularity. PyTorch is a complicated framework with many interrelated systems,
# too many for any one person to keep track of. An OpInfo can be thought of as the
# interface between an operator implementer and those other systems. Instead of
# requiring the implementer of torch.foo understand how to test its forward
# mode AD or NNC support that's typically handled automatically just by
# defining an OpInfo.
#
# It's often surprising to OpInfo writers that just implementing an OpInfo
# typically can't verify an operator is actually implemented correctly:
#
# "If an OpInfo doesn't validate my op works as expected, what's the point
# of it?"
#
# But the point of is the above. OpInfos are intended to let you focus on testing
# the operator logic you're familiar with instead of having to write tests for
# how the operator interacts with each of PyTorch's many systems.
#
# And, OK, it turns out that SOMETIMES just writing an OpInfo DOES
# validate your op works as expected, but that's only in special
# cases. See below for details.
#
# WHAT'S AN OPINFO?
# ~~~~~~~~~~~~~~~~~
#
# So what is an OpInfo? It's a Python class that describes an operator's properties,
# like which dtypes it supports on the CPU and whether it has any aliases.
# These properties can be divided into three categories:
#
# 1) Metadata describing the operator, like the operator's name and if it
# "supports" the out kwarg.
# 2) Test directives, like "skips" that tell the test suite to skip some
# tests.
# 3) A "sample inputs" function that generates valid inputs for the operator.
#
# OpInfo attributes are described in more detail below.
#
# THE SAMPLE INPUTS FUNCTION
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The "sample inputs" function merits special elaboration. This function is
# crucial to testing with OpInfos. A typical OpInfo test has to treat the operator
# as a black box. There's no structure for the test to understand or exploit.
# Without "sample inputs" it wouldn't even know how to call the OpInfo's
# operator. The sample input function saves the day by providing different
# "SampleInputs" that can be used to call the operator. A sample input
# function should have the following signature:
#
# def sample_inputs_foo(op_info, device, dtype, requires_grad, **kwargs):
#
# And should return an iterable of SampleInputs (see the class description
# above). Each SampleInput defines an "input", "args", "kwargs", an
# "output_process_fn_grad" function, the "broadcasts_input" bool and a
# "name".
#
# All the "sample_inputs" functions are invoked within a `torch.no_grad()`
# environment for efficiency and correctness. As such remember to set the
# "requires_grad" flag on the inputs **after** performing any transformations
# on them.
#
# The "input" is the first argument to the operator, or the tensor that
# the method or inplace variants of the operator should be called on, and
# should be on the requested device, of the requested dtype, and its
# requires_grad attribute should be set to the requires_grad argument.
#
# "args" should contain positional arguments, and "kwargs" keyword arguments.
#
# "output_process_fn_grad" has an interesting name. It's a function that maps
# the operator's output (when given the input, args, and kwargs) to the
# portion of the output to gradcheck. For example, consider an operator
# like torch.linalg.slogdet
# (https://pytorch.org/docs/main/generated/torch.linalg.slogdet.html).
# This operator returns a tuple of two tensors, but the first tensor
# cannot be backwarded through. Its "output_process_fn_grad" filters
# this output tuple to just the second argument, which we can call backward
# on. Functions that produce a single tensor can ignore this argument.
#
# "broadcasts_input" is a bool indicated if the SampleInput causes the operator
# to broadcast the "input" argument. This is important for tests to understand
# because inplace variants of operations throw a runtime error if they
# would broadcast their input arguments, so tests that work with inplace
# variants filter SampleInputs that broadcast their input.
#
# "name" is a string that's just used for debugging. It appears when printing
# the SampleInput.
#
# Sample inputs are designed to be used with many tests, some
# that are very time consuming, so they should be a small
# set with small tensors. An elaborated set of sample inputs
# can be specified using the "reference_inputs_func" attribute.
# The "reference inputs" for an operation are an extended
# set of sample inputs that can more exhausively test an
# operator. They are used by only a few tests that are careful
# not to take too long to run. Adding reference inputs
# is highly encouraged!
#
# THE (OPTIONAL) ERROR INPUTS FUNCTION
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# OpInfos may optionally specify "error inputs" through an error function. If
# specified test_errors in test_ops.py will call the op with these inputs
# and validate that the desired error is thrown.
#
# Error inputs automate a common testing pattern where multiple inputs are
# passed to an operation and the errors they thrown are reviewed. Tests
# written in this style should be ported to the new OpInfo pattern.
#
# Error inputs are specified using the ErrorInputs class, which contains
# a SampleInput (see above) and data about the expected error.
#
# OPINFO FILE ORGANIZATION
# ~~~~~~~~~~~~~~~~~~~~~~~~
#
# All OpInfos are currently defined in this file. Most OpInfo tests are defined
# in test_ops.py, but some system-specific tests are defined in those
# systems' test files, and subclass-specific tests are defined in the test
# file that corresponds to that subclass (see the below).
# Expect a reorganization in the future.
#
# WHAT'S TESTED?
# ~~~~~~~~~~~~~~
#
# Every OpInfo in the op_db sequence has the following properties validated in
# test_ops.py:
#
# - that its supported dtypes are specified correctly
# - that the operation produces the same results when called with noncontiguous inputs
# - that it supports the out= argument properly (if it allows out=),
# see https://github.com/pytorch/pytorch/wiki/Developer-FAQ#how-does-out-work-in-pytorch
# - that it works with the conjugate view bit properly
# - that its function, method, and inplace variants perform the same operation
# (that is, that torch.add, torch.Tensor.add, and torch.Tensor.add_ all
# do the same thing).
# - that its inplace variant preserves the input's storage
# - that its gradient formula is implemented correctly, and that it supports
# gradgrad and complex grad and gradgrad and forward mode AD properly for
# the op's function and inplace variants (method variants are skipped
# to reduce test time).
# - that the operation performs the same operation when traced or scripted
# using the jit
# - that the operation is autodifferentiated by the jit as expected
# - that the operator's aliases, if any, perform the same operation and that
# the jit understands the alias
# - that the operator throws the correct errors (if error_inputs is defined)
# - that the operator produces the same results as a NumPy reference (if ref is defined)
# - that the operator produces the same results as a NumPy reference on an extended
# set of "reference inputs" (if both ref and reference_inputs_func are defined)
# (NOTE: elementwise unary and elementwise binary OpInfos do this even if only
# ref is defined, because they effectively autogenerate reference inputs)
# - that the operator works on different CUDA devices
#
# Additional OpInfo tests are in test_jit_fuser_te.py, test_fx_experimental.py,
# and test_fx.py. These tests validate that operators work with NNC and FX
# as expected.
#
# For performance, some of the above tests may only run on the first
# SampleInput returned by an OpInfo's sample input function.
#
# In addition to these tests, some subclasses (discussed in the next section)
# define additional tests.
#
# Critically, as mentioned above, what's not necessarily tested is that the operator
# works as expected. When implementing an OpInfo an engineer must still
# typically write one or more tests validating the operator's behavior.
# The exception to this is if reference testing is sufficient, or if
# the operation belongs to an OpInfo subclass that has more exhaustive
# operator testing. Elementwise unary and elementwise binary operators,
# in particular, usually don't require additional testing beyond
# writing an Opinfo.
#
#
# OPINFO (SUB)CLASSES
# ~~~~~~~~~~~~~~~~~~~
#
# In addition to the OpInfo base class there are several specialized OpInfo
# subclasses. For example, the UnaryUfuncInfo subclass is used for
# unary elementwise operations. These operations have a common structure
# that test_unary_ufuncs.py exploits with additional automated testing.
# The automated testing in test_unary_ufuncs.py is so thorough, comparing
# the operator to a NumPy reference function on a plethora of values, that
# just implementing an OpInfo for a unary elementwise operation is often
# sufficient testing.
#
# The ForeachFuncInfo is another OpInfo subclass that is hyper-specialized to a
# very unique class of operations. These OpInfos aren't included in the
# op_db sequence and have their own tests.
#
# Other OpInfo subclasses, like SpectralFuncInfo, are just for convenience
# when writing OpInfos.
#
# TESTING A NEW OPERATOR
# ~~~~~~~~~~~~~~~~~~~~~~
#
# If you're adding a new operator to any of the following namespaces:
# - torch
# - torch.fft
# - torch.linalg,
# - torch.special
# - torch.nn.functional
# then you should typically add an OpInfo for it.
#
# As mentioned a couple times above, implementing an OpInfo is not
# usually sufficient testing (unless the operator is a unary or binary elementwise
# operator). The OpInfo will only test the properties described in the
# "WHAT'S TESTED" section. It DOES NOT necessarily verify that the operator is
# implemented correctly.
#
# TIPS FOR WRITING AN OPINFO AND OPINFO TESTS
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Writing an OpInfo can be a little daunting. Since the point of an OpInfo is to
# be consumed by a variety of systems it can be hard to understand how to
# deal with test failures or how to set the OpInfo metadata properly.
#
# Before adding an OpInfo it helps to look at other OpInfos. A sample inputs
# function must be defined, and the operator's dtypes must be specified.
# Once that's done you should run the operator's tests in test_ops.py
# (these can be filtered using the "-k" argument in pytest). Tests that
# fail should provide an error message that describes what to change about
# your OpInfo. You don't need to worry about changing an OpInfo's default
# values unless a test yells at you.
#
# Similarly, if you're writing a test that consumes OpInfos then it's critical
# your test provides a clear error message describing what to do when it
# fails. You should not assume the OpInfo implementer is familiar with your
# system.
#
# If you see a confusing error message while developing an OpInfo then please
# file an issue describing what happened.
#
# This trial-and-error approach to writing an OpInfo can be frustrating,
# but it's probably necessary as long as OpInfos don't require
# learning about all the systems that consume them. One thing that can help
# is the get_supported_dtypes() function defined in utils.py. This
# function can be used to programmatically specify the dtypes an operator
# supports, and is especially useful if writing an OpInfo on a machine
# without a CUDA device. See its documentation for more details.
#
# THE FUTURE OF OPINFOS AND OPINFO TESTING
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# In the future we expect OpInfo coverage to improve and cover
# the great majority of PyTorch's (public) operators.
#
# Classes and methods for the operator database
@dataclass
class OpInfo:
"""Operator information and helper functions for acquiring it."""
# the string name of the function
name: str
# An optional reference function that accepts ndarrays (AKA "NumPy arrays").
# If given, the op will be compared with its reference on each of its sample inputs.
ref: Optional[Callable] = None
# the following metadata describes the operator, its variants, and its aliases, if any
# iterable of aliases, e.g. ("absolute",) for torch.abs
aliases: Iterable = None
# additional string to include in the test name
# this is useful when an op needs multiple OpInfos,
# like divide does, often because it's really several
# different ops behind the scenes
variant_test_name: str = ""
# the function variant of the operation, populated as torch.<name> if None
op: Callable = None
# allows the method variant of this operation to be specified as follows:
# - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name
# - if None, then the OpInfo explicitly specifies is has no associated method
# - if a Callable, then that callable should be the method associated with this operation
method_variant: Callable = _NOTHING
# allows the inplace variant of this operation to be specified as follows:
# - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name
# - if None, then the OpInfo explicitly specifies is has no associated inplace variant
# - if a Callable, then that callable should be the inplace variant associated with this operation
inplace_variant: Callable = _NOTHING
# allows the operator variant of this operation to be specified as follows:
# - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name
# - if None, then the OpInfo explicitly specifies is has no associated operator
# - if a Callable, then that callable should be the operator associated with this operation
operator_variant: Callable = _NOTHING
# allows the inplace operator variant of this operation to be specified as follows:
# - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name
# - if None, then the OpInfo explicitly specifies is has no associated inplace operator
# - if a Callable, then that callable should be the inplace operator associated with this operation
inplace_operator_variant: Callable = _NOTHING
# the following metadata are test directives for skipping or modifying tests
# information about which tests to skip
skips: tuple = ()
# decorators to apply to generated tests
decorators: tuple = ()
# the following are pointers to functions to generate certain classes of inputs
# function to generate sample inputs with strided layouts
sample_inputs_func: Callable = None
# function to generate a more thorough set of samples inputs with strided layouts
reference_inputs_func: Callable = None
# function to generate inputs that will throw errors
error_inputs_func: Callable = None
# function to generate sparse (coo, csr, csc, bsr, bsc) inputs that will throw errors
error_inputs_sparse_func: Callable = None
# function to generate sample inputs with sparse coo layouts
sample_inputs_sparse_coo_func: Callable = None
# function to generate sample inputs with sparse csr layouts
sample_inputs_sparse_csr_func: Callable = None
# function to generate sample inputs with sparse csc layouts
sample_inputs_sparse_csc_func: Callable = None
# function to generate sample inputs with sparse bsr layouts
sample_inputs_sparse_bsr_func: Callable = None
# function to generate sample inputs with sparse bsc layouts
sample_inputs_sparse_bsc_func: Callable = None
# the following metadata relates to dtype support and is tested for correctness in test_ops.py
# dtypes this function works with on the CPU,
# inherited by other device types that don't specify their own dtypes
dtypes: _dispatch_dtypes = None
# the following dtypesIf... options override the dtypes value on their respective device types
# I.e. instead of writing multiple `dtypesIfCUDA`, `dtypesIfROCM`, etc one can simply define a dict
# dtypesIf = { 'cuda': (torch.float, torch.double), 'rocm': (torch.half, torch.bfloat16) }
dtypesIf: dict[str, _dispatch_dtypes] = field(default_factory=dict)
def __getattribute__(self, name: str) -> Any:
if name.startswith("dtypesIf") and name != "dtypesIf":
# TODO: Warn if used
dev_name = name.removeprefix("dtypesIf").lower()
return self.dtypesIf.get(dev_name)
return super().__getattribute__(name)
def __setattr__(self, name: str, value: Any) -> None:
# TODO: After migration, start adding warnings here
if name.startswith("dtypesIf") and name != "dtypesIf":
assert isinstance(value, (_dispatch_dtypes, type(None)))
dev_name = name.removeprefix("dtypesIf").lower()
self.dtypesIf[dev_name] = value
return
super().__setattr__(name, value)
# dtypes this function is expected to work with on CUDA
dtypesIfCUDA: _dispatch_dtypes = None
# dtypes this function is expected to work with on ROCM
dtypesIfROCM: _dispatch_dtypes = None
dtypesIfHpu: _dispatch_dtypes = None
# dtypes this function is expected to work with on XPU
dtypesIfXPU: _dispatch_dtypes = None
# backward dtypes this function is expected to work with
backward_dtypes: _dispatch_dtypes = None
# backward dtypes this function is expected to work with on CUDA
backward_dtypesIfCUDA: _dispatch_dtypes = None
# backward dtypes this function is expected to work with on ROCM
backward_dtypesIfROCM: _dispatch_dtypes = None
backward_dtypesIfHpu: _dispatch_dtypes = None
# the following metadata describes the operators out= support
# whether the op supports the out kwarg
# defaults to True, if the op does not allow the out kwarg or
# supports it incorrectly then test_out in test_ops.py should fail
supports_out: bool = True
# the following metadata relates to autograd support
# whether the operation supports backward mode AD
# if true, gradient correctness is tested in test_ops.py
# using the op's sample inputs
supports_autograd: bool = True
# whether the op supports second order gradients
# if true, gradgrad correctness is tested in test_ops.py
# defaults to support_autograd's value
# TODO: rename this to supports_bwgrad_bwgrad to be consistent with below
supports_gradgrad: bool = None
# whether the ops supports second order gradients via
# forward-over-reverse. If True, forward-over-reverse gradgrad correctness
# is tested. If False, test that forward grad is not implemented.
# Defaults to False.
supports_fwgrad_bwgrad: bool = False
# whether the operation supports inplace autograd
# if true, tested in test_ops.py
# defaults to supports_autograd's value
supports_inplace_autograd: bool = None
# Whether the operation support forward mode AD
# If the value is True, we check that the gradients are correct
# If the value is False, we test that forward grad is not implemented
supports_forward_ad: bool = False
# Whether the operation has a varargs variant
# (e.g. functions like ones, zeros, methods like view, permute)
supports_varargs: bool = False
# Whether the forward operation avoids materializing COW tensor inputs
supports_cow_input_no_materialize_forward: bool = True
# Whether the backward operation avoids materializing COW tensor inputs
supports_cow_input_no_materialize_backward: bool = True
# Whether to skip the backward part of the COW tensor input test
skip_cow_input_backward: bool = False
# If `supports_cow_input_no_materialize_forward == True`, this list contains
# the arg indices or kwarg names of inputs that are expected to materialize
allow_cow_input_materialize_forward: list[Union[int, str]] = None
# If `supports_cow_input_no_materialize_backward == True`, this list contains
# the arg indices or kwarg names of inputs that are expected to materialize
allow_cow_input_materialize_backward: list[Union[int, str]] = None
# wrapper function for gradcheck
gradcheck_wrapper: Callable = lambda op, *args, **kwargs: op(*args, **kwargs)
# whether to check batched grad when doing gradcheck
# defaults to support_autograd's value
check_batched_grad: bool = None
# whether to check batched grad grad when doing gradgradcheck
# default's to support_gradgrad's value
check_batched_gradgrad: bool = None
# whether to check batched forward grad when doing gradcheck
# defaults to the value of `supports_forward_ad`
check_batched_forward_grad: bool = None
# whether to check batched forward grad when doing gradcheck
# defaults to the value of `check_batched_forward_grad`
check_inplace_batched_forward_grad: bool = None
# tolerance for nondeterminism while performing gradcheck
gradcheck_nondet_tol: float = 0.0
# Whether to use the fast implmentation for gradcheck/gradgradcheck.
# When set to None, defers to the default value provided by the wrapper
# function around gradcheck (testing._internal.common_utils.gradcheck)
gradcheck_fast_mode: bool = None
# the following metadata relates to JIT support and is tested for correctness in test_ops.py
# name of the corresponding aten:: operator
aten_name: str = None
# if this is a composite implicit autograd op, the decomposed op
decomp_aten_name: Optional[str] = None
# name of the corresponding aten:: operator for backwards
aten_backward_name: Optional[str] = None
# if a op's aten::node is expected to be symbolically autodiffed
assert_autodiffed: bool = False
# a list of strings with node names that are expected to be in a
# DifferentiableGraph when autodiffed. Ex: ['aten::add', 'aten::mm'],
# default is populated to be ['aten::(name of Python operator)']
autodiff_nonfusible_nodes: list[str] = None
# a list of strings with node names that are expected to be in FusionGroups
# inside of DifferentiableGraphs when this operation is autodiffed.
# Ex: ['aten::add', 'aten::mm'], defaults to an empty list
# Note: currently no ops use fusible nodes
autodiff_fusible_nodes: list[str] = None
# the following metadata relates to sparse support and is used in test_sparse.py
# whether the op supports sparse coo inputs, defaults to False
# TODO: rename supports_sparse to supports_sparse_coo
supports_sparse: bool = None
# only run tracing tests
supports_scripting: bool = True
# if the operator can be traced
supports_tracing: bool = True
# the following metadata relates to sparse compressed support and
# is used in test_sparse_csr.py and test_sparse.py
# whether the op supports sparse csr inputs, defaults to False
supports_sparse_csr: bool = None
# whether the op supports sparse csc inputs, defaults to False
supports_sparse_csc: bool = None
# whether the op supports sparse bsr inputs, defaults to False
supports_sparse_bsr: bool = None
# whether the op supports sparse bsc inputs, defaults to False
supports_sparse_bsc: bool = None
# whether the op supports nested jagged inputs, defaults to False
supports_njt: bool = None
# whether the op promotes integer inputs to float
promotes_int_to_float: bool = False
# the following metadata relates to complex support and is checked in test_ops.py
test_conjugated_samples: bool = True
test_neg_view: bool = True
# assert that jit shape analysis fully propagates shape
assert_jit_shape_analysis: bool = False
# the following metadata relates to ExpandedWeights support and is checked in test_expanded_weights.py
supports_expanded_weight: bool = False
is_factory_function: bool = False
skip_correctness_check_compile_vs_eager: bool = False
def __post_init__(self):
self._original_opinfo_args = asdict(self).copy()
assert self.dtypes is not None, f"OpInfo for {self.name} has no dtypes!"
# Validates the dtypes are generated from the dispatch-related functions
for name, val in self.dtypesIf.items():
if val is not None:
assert isinstance(val, _dispatch_dtypes)
self.dtypesIf[name] = set(val)
if self.aten_name is None:
self.aten_name = self.name
# Attribute to verify dynamic_dtypes are used.
self.dynamic_dtypes = any(
isinstance(dtypes, utils._dynamic_dispatch_dtypes)
for dtypes in self.dtypesIf.values()
)
if self.dynamic_dtypes:
# Make sure `dtyesIfCUDA` is dynamic, if dynamic dispatch is used for CPU
# This is because, below we set dtypesIfCUDA to dtypes if they are None.
assert isinstance(self.dtypesIfCUDA, utils._dynamic_dispatch_dtypes), (
f"To use dynamic dtypes for operator {self.name}, "
"acquire the dtypes dynamically for argument `dtypesIfCUDA`."
"This is to ensure that CUDA dtypes are acquired correctly as they"
"differ from CPU dtypes occasionally"
)
self.dtypes = set(self.dtypes)
# NOTE: backward dtypes must be acquired before forward dtypes
# since they fallback to explicit (not implicit!) specifications of
# forward dtypes
self.backward_dtypesIfROCM = (
set(self.backward_dtypesIfROCM)
if self.backward_dtypesIfROCM is not None
else (
self.backward_dtypesIfCUDA
if self.backward_dtypesIfCUDA is not None
else self.backward_dtypes
if self.backward_dtypes is not None
else self.dtypesIfROCM
if self.dtypesIfROCM is not None
else self.dtypesIfCUDA
if self.dtypesIfCUDA is not None
else self.dtypes
)
)
self.backward_dtypesIfCUDA = (
set(self.backward_dtypesIfCUDA)
if self.backward_dtypesIfCUDA is not None
else (
self.backward_dtypes
if self.backward_dtypes is not None
else self.dtypesIfCUDA
if self.dtypesIfCUDA is not None
else self.dtypes
)
)
self.backward_dtypesIfHpu = (
set(self.backward_dtypesIfHpu)
if self.backward_dtypesIfHpu is not None
else (
self.backward_dtypes
if self.backward_dtypes is not None
else self.dtypes
)
)
self.backward_dtypes = (
set(self.backward_dtypes)
if self.backward_dtypes is not None
else self.dtypes
)
# Inherit from cpu
for dev_type in ["cuda", "hpu"]:
if self.dtypesIf.get(dev_type) is None:
self.dtypesIf[dev_type] = self.dtypes
# Inherit from CUDA
for dev_type in ["rocm", "xpu"]:
if self.dtypesIf.get(dev_type) is None:
self.dtypesIf[dev_type] = self.dtypesIf["cuda"]
# NOTE: if the op is unspecified it is assumed to be under the torch namespace
if not self.op:
self.op = _getattr_qual(torch, self.name)
if self.method_variant is _NOTHING:
self.method_variant = getattr(torch.Tensor, self.name, None)
# attributes like real, imag are not callable
if not callable(self.method_variant):
self.method_variant = None
if self.inplace_variant is _NOTHING:
inplace_name = self.name + "_"
self.inplace_variant = getattr(torch.Tensor, inplace_name, None)
if self.operator_variant is _NOTHING:
self.operator_variant = getattr(operator, self.name, None)
if self.inplace_operator_variant is _NOTHING:
# Note: operator.i<op> will use operator.<op> and assign the result to the lhs when no
# __i<op>__ method is found. This results in the appearance of an inplace operator variant which
# does not have the correct inplace behavior. To avoid this, we guard automatic detection of the inplace
# operator with a check that an inplace variant exists.
if self.inplace_variant is not None:
inplace_operator_name = "i" + self.name
self.inplace_operator_variant = getattr(
operator, inplace_operator_name, None
)
else:
self.inplace_operator_variant = None
self.decorators = (*self.decorators, *self.skips)
# Specifying sample inputs function without specifying the
# corresponding layout support implies the layout support:
if self.supports_sparse is None:
self.supports_sparse = self.sample_inputs_sparse_coo_func is not None
if self.sample_inputs_sparse_coo_func is None:
self.sample_inputs_sparse_coo_func = self._sample_inputs_unspecified
if self.supports_sparse_csr is None:
self.supports_sparse_csr = self.sample_inputs_sparse_csr_func is not None
if self.sample_inputs_sparse_csr_func is None:
self.sample_inputs_sparse_csr_func = self._sample_inputs_unspecified
if self.supports_sparse_csc is None:
self.supports_sparse_csc = self.sample_inputs_sparse_csc_func is not None
if self.sample_inputs_sparse_csc_func is None:
self.sample_inputs_sparse_csc_func = self._sample_inputs_unspecified
if self.supports_sparse_bsr is None:
self.supports_sparse_bsr = self.sample_inputs_sparse_bsr_func is not None
if self.sample_inputs_sparse_bsr_func is None:
self.sample_inputs_sparse_bsr_func = self._sample_inputs_unspecified
if self.supports_sparse_bsc is None:
self.supports_sparse_bsc = self.sample_inputs_sparse_bsc_func is not None
if self.sample_inputs_sparse_bsc_func is None:
self.sample_inputs_sparse_bsc_func = self._sample_inputs_unspecified
if self.supports_njt is None:
self.supports_njt = False
# We run the sampling functions without tracking the gradiends of the creation of inputs
self.sample_inputs_func = torch.no_grad()(self.sample_inputs_func)
self.sample_inputs_sparse_coo_func = torch.no_grad()(
self.sample_inputs_sparse_coo_func
)
self.sample_inputs_sparse_csr_func = torch.no_grad()(
self.sample_inputs_sparse_csr_func
)
self.sample_inputs_sparse_csc_func = torch.no_grad()(
self.sample_inputs_sparse_csc_func
)
self.sample_inputs_sparse_bsr_func = torch.no_grad()(
self.sample_inputs_sparse_bsr_func
)
self.sample_inputs_sparse_bsc_func = torch.no_grad()(
self.sample_inputs_sparse_bsc_func
)
if self.reference_inputs_func is not None:
self.reference_inputs_func = torch.no_grad()(self.reference_inputs_func)
if not self.autodiff_fusible_nodes:
self.autodiff_fusible_nodes = []
if self.autodiff_nonfusible_nodes is None:
self.autodiff_nonfusible_nodes = ["aten::" + self.name]
# Autograd support
# Autograd flags that depend on backward AD only
# - If setting has been explicitly set, raise error if inconsistent
if self.supports_gradgrad is None:
self.supports_gradgrad = self.supports_autograd
else:
assert not (self.supports_gradgrad and not self.supports_autograd), (
"supports_gradgrad refines the part of autograd is supported, so it should "
"not be set if supports_autograd is False"
)
if self.check_batched_grad is None:
self.check_batched_grad = self.supports_autograd or self.supports_forward_ad
else:
assert not (
self.check_batched_grad
and not (self.supports_autograd or self.supports_forward_ad)
), (
"check_batched_grad refines the part of autograd that will be checked (by gradcheck), so "
"it should not be set if supports_autograd is False"
)
if self.check_batched_gradgrad is None:
self.check_batched_gradgrad = self.supports_gradgrad
else:
assert not (self.check_batched_gradgrad and not self.supports_gradgrad), (
"check_batched_gradgrad refines the part of autograd that will be checked (by "
"gradgradcheck), so it should not be set if either supports_gradgrad or supports_autograd "
"is False."
)
if self.check_batched_forward_grad is None:
self.check_batched_forward_grad = self.supports_forward_ad
else:
assert not (
self.check_batched_forward_grad and not self.supports_forward_ad
), (
"check_batched_forward_grad should only be used when supports_forward_ad "
"is True. It is used to disable the test in the specific cases "
"where the op supports forward ad but fails to compute "
"batched forward grad."
)
if self.check_inplace_batched_forward_grad is None:
self.check_inplace_batched_forward_grad = self.check_batched_forward_grad
else:
assert not (
self.check_inplace_batched_forward_grad
and not self.check_batched_forward_grad
), (
"check_batched_forward_grad should only be used when check_batched_forward_grad "
"is True. It is used to disable the test in the specific cases "
"where the op supports batched forward grad but fails to compute batched forward "
"grad for the inplace variant of the op."
)
assert not (self.supports_fwgrad_bwgrad and not self.supports_autograd), (
"supports_fwgrad_bwgrad enables forward-over-backward gradgrad checks and should only be "
"True if backward ad is also checked, i.e., supports_forward_ad should be True.",
self.name,
)
# Autograd flags that depend on both forward AD and backward AD
if self.supports_inplace_autograd is None:
self.supports_inplace_autograd = (
self.supports_autograd or self.supports_forward_ad
)
else:
assert not (
self.supports_inplace_autograd
and not self.supports_autograd
and not self.supports_forward_ad
), (
"supports_inplace_autograd refines the part of autograd that is supported, so "
"it should not be set if both supports_autograd and supports_forward_ad are False"
)
if self.aliases is not None:
self.aliases = tuple(AliasInfo(a) for a in self.aliases) # type: ignore[assignment]
else:
self.aliases = ()
def __call__(self, *args, **kwargs):
"""Calls the function variant of the operator."""
return self.op(*args, **kwargs)
def __str__(self):
return dataclass_repr(self)
def get_op(self):
"""Returns the function variant of the operator, torch.<op_name>."""
return self.op
def get_method(self):
"""Returns the method variant of the operator, torch.Tensor.<op_name>.
Returns None if the operator has no method variant.
"""
return self.method_variant
def get_inplace(self):
"""Returns the inplace variant of the operator, torch.Tensor.<op_name>_.
Returns None if the operator has no inplace variant.
"""
return self.inplace_variant
def get_operator(self):
"""Returns operator variant of the operator, e.g. operator.neg
Returns None if the operator has no operator variant.
"""
return self.operator_variant
def get_inplace_operator(self):
"""Returns the inplace operator variant of the operator, e.g operator.iadd
Returns None if the operator has no inplace operator variant"""
return self.inplace_operator_variant
# Returns a tuple of callables:
# (TestCase -> subtest context, TestCase -> skip / xfail context)
# I'd love to combine these into one but I haven't figured out how to do it
# in a way that works like it should, and I tried a LOT of things.
def _maybe_skip_or_xfail(self, rules, device, sample, idx):
def _subtest_fn(test_case, sample=sample.name, idx=idx):
return test_case.subTest(sample=sample, idx=idx)
if rules is None or len(rules) == 0:
return (_subtest_fn, lambda _: contextlib.nullcontext())
# NB: match first rule only (order matters!)
for rule in rules:
if rule.sample_match_fn(device, sample):
log.debug(
"matched %s rule '%s': %s %s %s",
rule.type,
rule.name,
self.full_name,
device,
sample,
)
# Provide a context for the test case to run the sample input
# through as a subtest AND handle skip / xfail for it as needed.
return (
_subtest_fn,
lambda test_case, rule=rule: rule.get_context(test_case),
)
log.debug("matched no rules: %s %s %s", self.full_name, device, sample)
return (_subtest_fn, lambda _: contextlib.nullcontext())
def _sample_callback_fn(self, use_subtests, device):
# Get sample-specific skips / xfails.
sample_skips_and_xfails = getattr(
extract_test_fn(), "sample_skips_and_xfails", None
)
if sample_skips_and_xfails is not None and not use_subtests:
raise RuntimeError(
"""Sample-specific skips / xfails require use_subtests=True.
Please pass this to the sample generation function and run the test logic within the
returned contexts (NB: order matters!). For example:
def test_foo(self, device, dtype, op):
for sample, subtest_ctx, skip_xfail_ctx in op.sample_inputs(..., use_subtests=True):
# these contexts handle running within subtests and skips / xfails
with subtest_ctx(self), skip_xfail_ctx(self):
# test logic here
..."""
)
if not use_subtests:
# use the default callback that returns the sample without a subtest context
return None
if USE_PYTEST:
try:
import pytest_subtests # noqa: F401
except ModuleNotFoundError:
raise RuntimeError(
"Encountered an OpInfo test with use_subtests=True and pytest-subtests is "
"not installed. The feature will not work correctly within pytest without "
"this package; please install it."
) from None
def _f(
sample,
idx,
self=self,
device=device,
sample_skips_and_xfails=sample_skips_and_xfails,
use_subtests=use_subtests,
):
# When subtests are enabled, also return a subtest context. This is required
# for xfails / skips to work properly.
return (
sample,
*self._maybe_skip_or_xfail(
sample_skips_and_xfails, device, sample, idx
),
)
return _f
def conjugate_sample_inputs(self, device, dtype, requires_grad=False, **kwargs):
"""Returns an iterable of SampleInputs but with the tensor input or first
tensor in a sequence input conjugated.
"""
set_seed = kwargs.pop("set_seed", True)
use_subtests = kwargs.pop("use_subtests", False)
samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs)
conj_samples = list(samples)
def conjugate(tensor):
_requires_grad = tensor.requires_grad
tensor = tensor.conj()
return tensor.requires_grad_(_requires_grad)
for i, sample in enumerate(samples):
sample = conj_samples[i]
# Note: it is assumed that the input here is either a tensor or tensorlist
if isinstance(sample.input, torch.Tensor):
sample.input = conjugate(sample.input)
else:
sample.input[0] = conjugate(sample.input[0])
return TrackedInputIter(
iter(conj_samples),
"conjugate sample input",
item_callback=self._sample_callback_fn(use_subtests, device),
set_seed=set_seed,
restrict_to_index=OPINFO_SAMPLE_INPUT_INDEX,
)
def sample_inputs(self, device, dtype, requires_grad=False, **kwargs):
"""
Returns an iterable of SampleInputs.
These samples should be sufficient to test the function works correctly
with autograd, TorchScript, etc.
"""
set_seed = kwargs.pop("set_seed", True)
use_subtests = kwargs.pop("use_subtests", False)
samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs)
if kwargs.get("include_conjugated_inputs", False):
conj_samples = self.conjugate_sample_inputs(
device, dtype, requires_grad, **kwargs
)
samples_list = list(samples)
samples_list.extend(conj_samples)
samples = tuple(samples_list)
return TrackedInputIter(
iter(samples),
"sample input",
item_callback=self._sample_callback_fn(use_subtests, device),
set_seed=set_seed,
restrict_to_index=OPINFO_SAMPLE_INPUT_INDEX,
)
def reference_inputs(self, device, dtype, requires_grad=False, **kwargs):
"""
Returns an iterable of SampleInputs.
Distinct from sample_inputs() above because this returns an expanded set
of inputs when reference_inputs_func is defined. If undefined this returns
the sample inputs.
"""
set_seed = kwargs.pop("set_seed", True)
use_subtests = kwargs.pop("use_subtests", False)
if self.reference_inputs_func is None:
samples = self.sample_inputs_func(
self, device, dtype, requires_grad, **kwargs
)
return TrackedInputIter(
iter(samples),
"reference input",
item_callback=self._sample_callback_fn(use_subtests, device),
set_seed=set_seed,
restrict_to_index=OPINFO_SAMPLE_INPUT_INDEX,
)
if kwargs.get("include_conjugated_inputs", False):
raise NotImplementedError
references = self.reference_inputs_func(
self, device, dtype, requires_grad, **kwargs
)
return TrackedInputIter(
iter(references),
"reference input",
item_callback=self._sample_callback_fn(use_subtests, device),
set_seed=set_seed,
restrict_to_index=OPINFO_SAMPLE_INPUT_INDEX,
)
def error_inputs(self, device, **kwargs):
"""
Returns an iterable of ErrorInputs.
"""
set_seed = kwargs.pop("set_seed", True)
use_subtests = kwargs.pop("use_subtests", False)
errs = self.error_inputs_func(self, device, **kwargs)
def _error_item_callback(e, i, use_subtests=use_subtests, device=device):
cb = self._sample_callback_fn(use_subtests, device)
# no rules to apply; just return the sample
if cb is None:
return e
# adapt the callback call since ErrorInputs contain SampleInputs
_, subtest_ctx = cb(e.sample_input, i)
return (e, subtest_ctx)
return TrackedInputIter(
iter(errs),
"error input",
track_callback=lambda e: e.sample_input,
item_callback=_error_item_callback,
set_seed=set_seed,
restrict_to_index=OPINFO_SAMPLE_INPUT_INDEX,
)
def error_inputs_sparse(self, device, layout, **kwargs):
"""
Returns an iterable of ErrorInputs that contain sparse sample
inputs with a specified layout.
"""
if not self.supports_sparse_layout(layout):
raise unittest.SkipTest("unsupported sparse layout")
return self.error_inputs_sparse_func(self, device, layout, **kwargs)
def supports_sparse_layout(self, layout):
"""Return True if OpInfo supports the specified sparse layout."""
layout_name = str(layout).split(".")[-1]
# map torch.sparse_coo to OpInfo.supports_sparse:
layout_name = layout_name.replace("_coo", "")
return getattr(self, f"supports_{layout_name}")
def sample_inputs_sparse(
self, layout, device, dtype, requires_grad=False, **kwargs
):
"""Returns an iterable of SampleInputs that contain inputs with a
specified sparse layout.
"""
layout_name = str(layout).split(".")[-1]
sample_inputs_mth = getattr(self, "sample_inputs_" + layout_name)
def non_empty_sampler(op, generator):
found_sample = False
for sample in generator:
found_sample = True
yield sample
if not found_sample:
raise unittest.SkipTest("NO SAMPLES!")
return non_empty_sampler(
self,
sample_inputs_mth(device, dtype, requires_grad=requires_grad, **kwargs),
)
def _sample_inputs_unspecified(self, *args, **kwargs):
"""Raises an NotImplemented exception in a OpInfo instance creation
that specifies supports_sparse(|_csr|_csc|_bsr|_bsc)=True
without specifying the corresponding sample function as
sample_inputs_sparse_(coo|csr|csc|bsr|bsc)_func.
To avoid this, either define the corresponding sample function,
or re-map unsupported samples to error inputs in an appropiate
opinfo/definitions/sparse.py:_validate_sample_input_sparse_<op>
function.
"""
raise NotImplementedError("no sample function specified")
def sample_inputs_sparse_coo(self, device, dtype, requires_grad=False, **kwargs):
"""Returns an iterable of SampleInputs that contain inputs with sparse
coo layout.
"""
return self.sample_inputs_sparse_coo_func(
self, device, dtype, requires_grad, **kwargs
)
def sample_inputs_sparse_csr(self, device, dtype, requires_grad=False, **kwargs):
"""Returns an iterable of SampleInputs that contain inputs with sparse
csr layout.
"""
return self.sample_inputs_sparse_csr_func(
self, device, dtype, requires_grad, **kwargs
)
def sample_inputs_sparse_csc(self, device, dtype, requires_grad=False, **kwargs):
"""Returns an iterable of SampleInputs that contain inputs with sparse
csc layout.
"""
return self.sample_inputs_sparse_csc_func(
self, device, dtype, requires_grad, **kwargs
)
def sample_inputs_sparse_bsr(self, device, dtype, requires_grad=False, **kwargs):
"""Returns an iterable of SampleInputs that contain inputs with sparse
bsr layout.
"""
return self.sample_inputs_sparse_bsr_func(
self, device, dtype, requires_grad, **kwargs
)
def sample_inputs_sparse_bsc(self, device, dtype, requires_grad=False, **kwargs):
"""Returns an iterable of SampleInputs that contain inputs with sparse
bsc layout.
"""
return self.sample_inputs_sparse_bsc_func(
self, device, dtype, requires_grad, **kwargs
)
def get_decorators(self, test_class, test_name, device, dtype, param_kwargs):
"""Returns the decorators targeting the given test."""
result = []
for decorator in self.decorators:
if isinstance(decorator, DecorateInfo):
if decorator.is_active(
test_class, test_name, device, dtype, param_kwargs
):
result.extend(decorator.decorators)
else:
result.append(decorator)
return result
def supported_dtypes(self, device_type):
if device_type == "privateuse1":
device_type = torch._C._get_privateuse1_backend_name()
device_type = torch.device(device_type).type
if device_type == "cuda" and TEST_WITH_ROCM:
device_type = "rocm"
return self.dtypesIf.get(device_type, self.dtypes)
def supported_backward_dtypes(self, device_type):
if not self.supports_autograd:
return set()
if device_type == "privateuse1":
device_type = torch._C._get_privateuse1_backend_name()
device_type = torch.device(device_type).type
backward_dtypes = None
if device_type == "cuda":
backward_dtypes = (
self.backward_dtypesIfROCM
if TEST_WITH_ROCM
else self.backward_dtypesIfCUDA
)
elif device_type == "hpu":
backward_dtypes = self.backward_dtypesIfHpu
else:
backward_dtypes = self.backward_dtypes
allowed_backward_dtypes = floating_and_complex_types_and(
torch.bfloat16, torch.float16, torch.complex32
)
return set(allowed_backward_dtypes).intersection(backward_dtypes)
def supports_dtype(self, dtype, device_type) -> bool:
return dtype in self.supported_dtypes(device_type)
@property
def full_name(self):
"""Returns a full name that helps to uniquely identify this OpInfo."""
variant = "." + self.variant_test_name if self.variant_test_name else ""
# example: "normal.in_place" where "normal" is the name and "in_place" is the variant
return f"{self.name}{variant}"
@property
def formatted_name(self):
"""Returns a formatted full name for this OpInfo that can be used in test names."""
return self.full_name.replace(".", "_")
# Represents a skip / xfail rule matching a particular set of tests. It allows granularity
# at the device, dtype, op, and individual sample levels. This flexibility allows entire
# bugs to be represented by a single rule, even if this corresponds with multiple conceptual
# test cases across multiple ops.
@dataclass
class SampleRule(ABC):
# function to indicate whether the rule applies to this op; return True if so
# NB: str arg of callable is device_type
op_match_fn: Callable[[str, OpInfo], bool] = None
# function to indicate whether the rule applies to this sample; return True if so
sample_match_fn: Callable[[torch.device, SampleInput], bool] = None
# optional name for identifying the rule
name: str = ""
def __post_init__(self):
if self.op_match_fn is None:
raise ValueError("must have op_match_fn set to be useful")
if self.sample_match_fn is None:
# by default, match for all samples
self.sample_match_fn = lambda device, sample: True
# returns a string identifier of the rule type
@abstractmethod
def type(self) -> str:
...
# returns an appropriate context that handles the xfail, skips, etc.
@abstractmethod
def get_context(self, test_case):
...
# useful for specifying xfails
@dataclass
class XFailRule(SampleRule):
# expected error type
error_type: TypeVar = Exception
# expected error message
error_msg: str = ".*"
@property
def type(self) -> str:
return "xfail"
def get_context(self, test_case):
return test_case.assertRaisesRegex(
# failing within torch.compile wraps within a BackendCompilerFailed
(self.error_type, torch._dynamo.exc.BackendCompilerFailed),
self.error_msg,
)
# useful for specifying skips
@dataclass
class SkipRule(SampleRule):
@property
def type(self):
return "skip"
def get_context(self, test_case):
@contextlib.contextmanager
def skipcontext(test_case=test_case):
test_case.skipTest("Skipped!")
yield
return skipcontext()
# Decorator that defines skip / xfail rules for a given test function. If these are
# present, the @ops decorator will apply these for each op and place them onto the
# parametrized test functions for use by e.g. OpInfo.sample_inputs().
class sample_skips_and_xfails:
def __init__(self, rules):
self.rules = rules
def __call__(self, fn):
rules = getattr(fn, "sample_skips_and_xfails", None)
if rules is not None:
raise RuntimeError("Multiple sets of sample_skips_and_xfails defined")
fn.sample_skips_and_xfails = self.rules
return fn
def _generate_reduction_inputs(device, dtype, requires_grad, **kwargs):
"""Generates input tensors for testing reduction operators"""
yield make_tensor([], dtype=dtype, device=device, requires_grad=requires_grad)
yield make_tensor([2], dtype=dtype, device=device, requires_grad=requires_grad)
yield make_tensor([3, 5], dtype=dtype, device=device, requires_grad=requires_grad)
yield make_tensor(
[3, 2, 1, 2], dtype=dtype, device=device, requires_grad=requires_grad
)
def _generate_reduction_kwargs(ndim, supports_multiple_dims=True):
"""Generates a subset of all valid dim and keepdim kwargs given ndim that
is appropriate for testing reduction operators.
"""
# Test default dim and keepdim
yield {}
# Test reducing inner and outer most dimensions
yield {"dim": 0, "keepdim": True}
yield {"dim": -1, "keepdim": False}
# Test reducing middle dimension
if ndim > 2:
yield {"dim": ndim // 2, "keepdim": True}
if supports_multiple_dims:
# Test reducing all dimensions
yield {"dim": tuple(range(ndim)), "keepdim": False}
# Test reducing both first and last dimensions
if ndim > 1:
yield {"dim": (0, -1), "keepdim": True}
# Test reducing every other dimension starting with the second
if ndim > 3:
yield {"dim": tuple(range(1, ndim, 2)), "keepdim": False}
def sample_inputs_reduction(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for reduction operators."""
# TODO(@heitorschueroff) Once all reduction operators are using
# ReductionOpInfo use op_info.supports_multiple_dims directly.
supports_multiple_dims: bool = kwargs.get("supports_multiple_dims", True)
# TODO(@heitorschueroff) Once all reduction operators are using ReductionOpInfo
# use op_info.generate_args_kwargs directly.
generate_args_kwargs = kwargs.get(
"generate_args_kwargs", lambda *args, **kwargs: (yield (), {})
)
for t in _generate_reduction_inputs(device, dtype, requires_grad):
for reduction_kwargs in _generate_reduction_kwargs(
t.ndim, supports_multiple_dims
):
for args, kwargs in generate_args_kwargs(t, **reduction_kwargs):
kwargs.update(reduction_kwargs)
yield SampleInput(
t.detach().requires_grad_(requires_grad), args=args, kwargs=kwargs
)
# NOTE [Reductions]:
#
# For testing purposes, we relax the definition of a reduction operator
# as defined in the docstring below. We do this to capture operators with
# a similar API so they can be tested automatically. However...
#
# Strictly speaking a reduction operator is an operator that can reduce an
# array to a single scalar value and that can be computed from the partial
# result of reducing subarrays. This usually means that the reduction operation
# should be commutative and associative. This definition is important when it
# comes to implementation as it determines how a reduction can be parallelized.
#
# For example, many summary statistics such as median, mode and quantile cannot
# be computed from partial results because these are sorting and counting based
# algorithms that need information that would be lost in the reduced value.
class ReductionOpInfo(OpInfo):
"""Reduction operator information.
An operator is a reduction operator if it reduces one or more dimensions of
the input tensor to a single value. Reduction operators must implement the
following signature:
- `op(input, *args, *, dim=None, keepdim=False, **kwargs) -> Tensor`
ReductionOpInfo tests that reduction operators implement a consistent API.
Optional features such as reducing over multiple dimensions are captured in
the optional keyword parameters of the ReductionOpInfo constructor.
If a reduction operator does not yet implement the full required API of
reduction operators, this should be documented by xfailing the failing
tests rather than adding optional parameters to ReductionOpInfo.
NOTE
The API for reduction operators has not yet been finalized and some
requirements may change.
See tests in test/test_reductions.py
"""
def __init__(
self,
name,
*,
# The identity value for the operator if it has one.
identity: Optional[Any] = None,
# The nan policy for the operator if it implements one.
# - propagate: NaN values are propagated to the output
# - omit: NaN values are discarded during the reduction
nan_policy: Optional[str] = None,
# Whether the operator supports reducing multiple dimensions.
supports_multiple_dims: bool = True,
# Whether the operator promotes integral to floating point dtypes.
promotes_int_to_float: bool = False,
# Whether the operator promotes all integral dtypes to int64.
promotes_int_to_int64: bool = False,
# If a specific dtype is given, then the operator always returns that
# dtype irrespective of the input dtype. If None, the operator returns
# the dtype according to the type promotion rules above.
result_dtype: Optional[torch.dtype] = None,
# Casts complex results to real (e.g. linalg.norm or torch.var)
complex_to_real: bool = False,
# ReductionOpInfo tests generate their own input, dim and keepdim
# arguments and call this function to generate tuples of extra args and
# kwargs to use when calling the op. This is required for operators that
# have other required parameters besides the input tensor.
generate_args_kwargs: Callable = lambda t, dim=None, keepdim=False: (
yield (),
{},
),
# Options from the OpInfo base class
**kwargs,
):
self._original_reduction_args = locals().copy()
assert nan_policy in (None, "propagate", "omit")
# These are mutually exclusive options
assert not (result_dtype and promotes_int_to_float)
assert not (result_dtype and promotes_int_to_int64)
assert not (result_dtype and complex_to_real)
assert not (promotes_int_to_float and promotes_int_to_int64)
# Default sample_inputs_func for ReductionOpInfo which augments sample
# inputs from sample_inputs_reduction with the args and kwargs from
# generate_args_kwargs. This is only used if sample_inputs_func is None.
def sample_inputs_func(*args, **kwargs):
kwargs["supports_multiple_dims"] = supports_multiple_dims
kwargs["generate_args_kwargs"] = generate_args_kwargs
yield from sample_inputs_reduction(*args, **kwargs)
# Override OpInfo defaults and call base class __init__
kwargs.setdefault("inplace_variant", None)
kwargs.setdefault("sample_inputs_func", sample_inputs_func)
super().__init__(name, promotes_int_to_float=promotes_int_to_float, **kwargs)
self.identity = identity
self.nan_policy = nan_policy
self.supports_multiple_dims = supports_multiple_dims
self.promotes_int_to_int64 = promotes_int_to_int64
self.complex_to_real = complex_to_real
self.result_dtype = result_dtype
self.generate_args_kwargs = generate_args_kwargs
# The base reference input generation for elementwise binary operations
def _reference_inputs_elementwise_binary(
op, device, dtype, requires_grad, exclude_zero, **kwargs
):
yield from op.sample_inputs_func(op, device, dtype, requires_grad, **kwargs)
yield from generate_elementwise_binary_tensors(
op,
device=device,
dtype=dtype,
requires_grad=requires_grad,
exclude_zero=exclude_zero,
)
if dtype is not torch.bool:
yield from generate_elementwise_binary_small_value_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad
)
if dtype not in (torch.bool, torch.uint8, torch.int8):
yield from generate_elementwise_binary_large_value_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad
)
yield from generate_elementwise_binary_broadcasting_tensors(
op,
device=device,
dtype=dtype,
requires_grad=requires_grad,
exclude_zero=exclude_zero,
)
yield from generate_elementwise_binary_with_scalar_samples(
op, device=device, dtype=dtype, requires_grad=requires_grad
)
yield from generate_elementwise_binary_with_scalar_and_type_promotion_samples(
op, device=device, dtype=dtype, requires_grad=requires_grad
)
if dtype.is_floating_point or dtype.is_complex:
yield from generate_elementwise_binary_extremal_value_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad
)
# Note that these references inputs use scalars for the SampleInput.input value,
# and many tests require SampleInput.input be a tensor or a list of tensors
def reference_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs):
if hasattr(op, "rhs_make_tensor_kwargs"):
exclude_zero = op.rhs_make_tensor_kwargs.get("exclude_zero", False)
gen = partial(
_reference_inputs_elementwise_binary,
op,
device,
dtype,
requires_grad,
exclude_zero,
**kwargs,
)
# yields "normal" samples
yield from gen()
# yields noncontiguous samples
for sample in gen():
yield sample.noncontiguous()
yield from generate_elementwise_binary_noncontiguous_tensors(
op,
device=device,
dtype=dtype,
requires_grad=requires_grad,
exclude_zero=exclude_zero,
)
yield from generate_elementwise_binary_arbitrarily_strided_tensors(
op,
device=device,
dtype=dtype,
requires_grad=requires_grad,
exclude_zero=exclude_zero,
)
# A functional that extends an elementwise binary operator's bespoke error inputs
# with generic error inputs for the class of elementwise binary operations
def make_error_inputs_elementwise_binary(error_inputs_func):
def error_inputs_func_wrapper(op, device, **kwargs):
if error_inputs_func is not None:
yield from error_inputs_func(op, device, **kwargs)
if not op.supports_rhs_python_scalar:
si = SampleInput(torch.tensor((1, 2, 3), device=device), args=(2,))
yield ErrorInput(si, error_type=Exception, error_regex="")
if not op.supports_one_python_scalar:
si = SampleInput(2, args=(torch.tensor((1, 2, 3), device=device),))
yield ErrorInput(si, error_type=Exception, error_regex="")
if (
not kwargs.get("skip_two_python_scalars", False)
and not op.supports_two_python_scalars
):
si = SampleInput(2, args=(3,))
yield ErrorInput(si, error_type=Exception, error_regex="")
return error_inputs_func_wrapper
# The following functions and classes are for testing elementwise binary operators.
# Returns a generator of pairs of contiguous tensors on the requested device
# and with the requested dtype.
#
# This function is intended to test the non-vectorized and vectorized code
# paths of elementwise binary functions, as well as their handling of odd tensor
# sizes (like zero-dim tensors and tensors with zero elements).
#
# Each iterable will include an a tensor with no elements,
# zero dim (scalar) tensors, small 1D tensors, a medium 1D tensor, and
# a large 2D tensor.
def generate_elementwise_binary_tensors(
op, *, device, dtype, requires_grad=False, exclude_zero=False
):
shapes = (
# tensors with no elements
(0,),
(1, 0, 3),
# zero dim (scalar) tensor
(),
# small 1D tensor
(20,),
# medium 1D tensor
(812,),
# large 2D tensor
(1029, 917),
)
make_arg = partial(
make_tensor,
device=device,
dtype=dtype,
requires_grad=requires_grad,
exclude_zero=exclude_zero,
)
for shape in shapes:
lhs = make_arg(shape, **op.lhs_make_tensor_kwargs)
rhs = make_arg(shape, **op.rhs_make_tensor_kwargs)
yield SampleInput(
lhs, args=(rhs,), kwargs=op.sample_kwargs(device, dtype, lhs)[0]
)
def generate_elementwise_binary_arbitrarily_strided_tensors(
op, *, device, dtype, requires_grad=False, exclude_zero=False
):
# shape, strides, offset
strided_cases = (
((5, 6, 2), (1, 1, 7), 2),
((5, 5, 4), (1, 1, 7), 2),
((5, 5, 2), (4, 5, 7), 3),
((5, 5, 2), (5, 5, 7), 3),
((5, 5, 2), (5, 5, 5), 3),
((9, 5, 2), (0, 1, 7), 3),
)
make_arg = partial(
make_tensor,
device=device,
dtype=dtype,
requires_grad=requires_grad,
exclude_zero=exclude_zero,
)
for shape, strides, offset in strided_cases:
a = make_arg(
500,
).as_strided(shape, strides, offset)
b = make_arg(shape)
yield SampleInput(a, args=(b,), kwargs=op.sample_kwargs(device, dtype, a)[0])
# Returns a generator of pairs of contiguous tensors on the requested device and with
# the requested dtype.
#
# Unlike the previous function, the values in these tensors are specified manually.
def generate_elementwise_binary_small_value_tensors(
op, *, device, dtype, requires_grad=False, exclude_zero=None
):
if exclude_zero is None:
if hasattr(op, "rhs_make_tensor_kwargs"):
exclude_zero = op.rhs_make_tensor_kwargs.get("exclude_zero", False)
# defines interesting values
_unsigned_int_vals = (0, 1, 55, 127, 128, 190, 210, 220, 254)
_int_vals = (0, -1, 1, -55, 55, -127, 127, -128)
_float_vals = (
0.0,
-0.0,
-0.001,
0.001,
-0.25,
0.25,
-1.0,
1.0,
-math.pi / 2,
math.pi / 2,
-math.pi + 0.00001,
math.pi - 0.00001,
-math.pi,
math.pi,
-math.pi - 0.00001,
math.pi + 0.00001,
)
l_vals = []
r_vals = []
if dtype.is_floating_point:
prod = product(_float_vals, _float_vals)
elif dtype.is_complex:
complex_vals = product(_float_vals, _float_vals)
# Note the use of list is required here or the map generator will be
# emptied by the following product and it won't produce the desired cross-product
complex_vals = [complex(*x) for x in complex_vals]
prod = product(complex_vals, complex_vals)
elif dtype in (torch.int8, torch.int16, torch.int32, torch.int64):
prod = product(_int_vals, _int_vals)
elif dtype is torch.uint8:
prod = product(_unsigned_int_vals, _unsigned_int_vals)
else:
raise ValueError("Unsupported dtype!")
for l, r in prod:
l_vals.append(l)
if r == 0 and exclude_zero:
r_vals.append(1)
else:
r_vals.append(r)
lhs = torch.tensor(l_vals, device=device, dtype=dtype, requires_grad=requires_grad)
rhs = torch.tensor(r_vals, device=device, dtype=dtype, requires_grad=requires_grad)
yield SampleInput(lhs, args=(rhs,), kwargs=op.sample_kwargs(device, dtype, lhs)[0])
def generate_elementwise_binary_large_value_tensors(
op, *, device, dtype, requires_grad=False
):
_large_int_vals = (-1113, 1113, -10701, 10701)
_large_float16_vals = (-501, 501, -1001.2, 1001.2, -13437.7, 13437.7)
_large_float_vals = _large_float16_vals + (-4988429.2, 4988429.2, -1e20, 1e20)
l_vals = []
r_vals = []
if dtype == torch.float16:
prod = product(_large_float16_vals, _large_float16_vals)
elif dtype.is_floating_point:
prod = product(_large_float_vals, _large_float_vals)
elif dtype.is_complex:
complex_vals = product(_large_float_vals, _large_float_vals)
# Note the use of list is required here or the map generator will be
# emptied by the following product and it won't produce the desired cross-product
complex_vals = [complex(*x) for x in complex_vals]
prod = product(complex_vals, complex_vals)
elif dtype in (torch.int16, torch.int32, torch.int64):
prod = product(_large_int_vals, _large_int_vals)
else:
raise ValueError("Unsupported dtype!")
for l, r in prod:
l_vals.append(l)
r_vals.append(r)
lhs = torch.tensor(l_vals, device=device, dtype=dtype, requires_grad=requires_grad)
rhs = torch.tensor(r_vals, device=device, dtype=dtype, requires_grad=requires_grad)
yield SampleInput(lhs, args=(rhs,), kwargs=op.sample_kwargs(device, dtype, lhs)[0])
def generate_elementwise_binary_extremal_value_tensors(
op, *, device, dtype, requires_grad=False
):
_float_extremals = (float("inf"), float("-inf"), float("nan"))
l_vals = []
r_vals = []
if dtype.is_floating_point:
prod = product(_float_extremals, _float_extremals)
elif dtype.is_complex:
complex_vals = product(_float_extremals, _float_extremals)
# Note the use of list is required here or the map generator will be
# emptied by the following product and it won't produce the desired cross-product
complex_vals = [complex(*x) for x in complex_vals]
prod = product(complex_vals, complex_vals)
else:
raise ValueError("Unsupported dtype!")
for l, r in prod:
l_vals.append(l)
r_vals.append(r)
lhs = torch.tensor(l_vals, device=device, dtype=dtype, requires_grad=requires_grad)
rhs = torch.tensor(r_vals, device=device, dtype=dtype, requires_grad=requires_grad)
yield SampleInput(lhs, args=(rhs,), kwargs=op.sample_kwargs(device, dtype, lhs)[0])
# Test case for NaN propagation
nan = (
float("nan") if dtype.is_floating_point else complex(float("nan"), float("nan"))
)
lhs = make_tensor(
(128, 128), device=device, dtype=dtype, requires_grad=requires_grad
)
lhs.view(-1)[::3] = nan
rhs = make_tensor(
(128, 128), device=device, dtype=dtype, requires_grad=requires_grad
)
rhs.view(-1)[::3] = nan
yield SampleInput(lhs, args=(rhs,), kwargs=op.sample_kwargs(device, dtype, lhs)[0])
# Returns a generator of pairs of contiguous and noncontiguous tensors that
# require broadcasting
def generate_elementwise_binary_broadcasting_tensors(
op, *, device, dtype, requires_grad=False, exclude_zero=False
):
shapes = (
((1,), ()),
((2,), ()),
((1,), (2,)),
((2, 1), (2,)),
((1, 2), (2,)),
((3, 2), (2,)),
((1, 3, 2), (2,)),
((1, 3, 2), (3, 2)),
((3, 1, 2), (3, 2)),
((2, 3, 2), ()),
((3, 1, 2), (1, 3, 2)),
)
make_arg = partial(
make_tensor,
device=device,
dtype=dtype,
requires_grad=requires_grad,
exclude_zero=exclude_zero,
)
for shape, noncontiguous in product(shapes, [True, False]):
shape_lhs, shape_rhs = shape
lhs = make_arg(
shape_lhs, noncontiguous=noncontiguous, **op.lhs_make_tensor_kwargs
)
rhs = make_arg(
shape_rhs, noncontiguous=noncontiguous, **op.rhs_make_tensor_kwargs
)
yield SampleInput(
lhs,
args=(rhs,),
broadcasts_input=True,
kwargs=op.sample_kwargs(device, dtype, lhs)[0],
)
# Returns a generator of pairs of contiguous tensors and scalars
def generate_elementwise_binary_with_scalar_samples(
op, *, device, dtype, requires_grad=False
):
make_arg = partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad
)
shapes = ((), (3,), (5, 3), (0, 1, 3), (1, 5))
if op.supports_rhs_python_scalar:
for shape in shapes:
lhs = make_arg(shape, **op.lhs_make_tensor_kwargs)
rhs = make_arg(shape, **op.rhs_make_tensor_kwargs)
lhs_scalar = make_arg((), **op.lhs_make_tensor_kwargs).item()
rhs_scalar = make_arg((), **op.rhs_make_tensor_kwargs).item()
yield SampleInput(
lhs, args=(rhs_scalar,), kwargs=op.sample_kwargs(device, dtype, lhs)[0]
)
# Extends with scalar lhs
if op.supports_one_python_scalar:
yield SampleInput(
lhs_scalar,
args=(rhs,),
kwargs=op.sample_kwargs(device, dtype, lhs_scalar)[0],
)
if op.supports_two_python_scalars:
lhs_scalar = make_arg((), **op.lhs_make_tensor_kwargs).item()
rhs_scalar = make_arg((), **op.rhs_make_tensor_kwargs).item()
yield SampleInput(
lhs_scalar,
args=(rhs_scalar,),
kwargs=op.sample_kwargs(device, dtype, lhs_scalar)[0],
)
# Returns a generator of pairs of contiguous tensors and 0d tensors and scalars and type promotion
def generate_elementwise_binary_with_scalar_and_type_promotion_samples(
op, *, device, dtype, requires_grad=False
):
# add these samples only for logical and comparison ops, arithmetic ops are not happy about extremal scalars
if op.name in (
"eq",
"ne",
"gt",
"ge",
"lt",
"le",
"logical_and",
"logical_or",
"logical_xor",
):
make_arg = partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad
)
shape = (
23,
) # this shape is big enough to trigger vectorization, and has non-vectorized tail
values = (float("nan"), float("inf"), -float("inf"))
scalar_tensors = tuple(torch.tensor(val) for val in values)
if op.supports_rhs_python_scalar:
lhs = make_arg(shape, **op.lhs_make_tensor_kwargs)
rhs = make_arg(shape, **op.rhs_make_tensor_kwargs)
for scalar in values + scalar_tensors:
yield SampleInput(
lhs, args=(scalar,), kwargs=op.sample_kwargs(device, dtype, lhs)[0]
)
# Extends with scalar lhs
if op.supports_one_python_scalar:
yield SampleInput(
scalar,
args=(rhs,),
kwargs=op.sample_kwargs(device, dtype, scalar)[0],
)
# Returns a generator of pairs of noncontiguous tensors
def generate_elementwise_binary_noncontiguous_tensors(
op, *, device, dtype, requires_grad=False, exclude_zero=False
):
make_arg = partial(
make_tensor,
device=device,
dtype=dtype,
requires_grad=requires_grad,
exclude_zero=exclude_zero,
)
# Generic noncontiguity
lhs = make_arg((1026,), noncontiguous=True, **op.lhs_make_tensor_kwargs)
rhs = make_arg((1026,), noncontiguous=True, **op.rhs_make_tensor_kwargs)
yield SampleInput(
lhs.clone(), args=(rhs.clone(),), kwargs=op.sample_kwargs(device, dtype, lhs)[0]
)
yield SampleInput(
lhs.contiguous(), args=(rhs,), kwargs=op.sample_kwargs(device, dtype, lhs)[0]
)
# Transposed
lhs = make_arg((789, 357), **op.lhs_make_tensor_kwargs)
rhs = make_arg((789, 357), **op.rhs_make_tensor_kwargs)
yield SampleInput(
lhs.T, args=(rhs.T,), kwargs=op.sample_kwargs(device, dtype, lhs)[0]
)
# More noncontiguity
shapes = ((5, 7), (1024,))
for shape in shapes:
lhs = make_arg(shape, **op.lhs_make_tensor_kwargs)
rhs = make_arg(shape, **op.rhs_make_tensor_kwargs)
lhs_non_contig = torch.empty(shape + (2,), device=device, dtype=dtype)[..., 0]
lhs_non_contig.copy_(lhs)
rhs_non_contig = torch.empty(shape + (2,), device=device, dtype=dtype)[..., 0]
rhs_non_contig.copy_(rhs)
yield SampleInput(
lhs_non_contig.clone(),
args=(rhs_non_contig.clone(),),
kwargs=op.sample_kwargs(device, dtype, lhs)[0],
)
yield SampleInput(
lhs_non_contig.contiguous(),
args=(rhs_non_contig,),
kwargs=op.sample_kwargs(device, dtype, lhs)[0],
)
# Noncontiguous indices
shape = (2, 2, 1, 2)
lhs = make_arg(shape, **op.lhs_make_tensor_kwargs)
rhs = make_arg(shape, **op.rhs_make_tensor_kwargs)
lhs_non_contig = lhs[:, 1, ...]
rhs_non_contig = rhs[:, 1, ...]
yield SampleInput(
lhs_non_contig.clone(),
args=(rhs_non_contig.clone(),),
kwargs=op.sample_kwargs(device, dtype, lhs)[0],
)
yield SampleInput(
lhs_non_contig.contiguous(),
args=(rhs_non_contig,),
kwargs=op.sample_kwargs(device, dtype, lhs)[0],
)
# Expanded tensors
shapes = ((1, 3), (1, 7), (5, 7))
for shape in shapes:
lhs = make_arg(shape, **op.lhs_make_tensor_kwargs)
rhs = make_arg(shape, **op.rhs_make_tensor_kwargs)
lhs_non_contig = lhs.expand(3, -1, -1)
rhs_non_contig = rhs.expand(3, -1, -1)
yield SampleInput(
lhs_non_contig,
args=(rhs_non_contig,),
kwargs=op.sample_kwargs(device, dtype, lhs)[0],
)
# Sample inputs for elementwise binary operators, like add
def sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs):
_M = S if kwargs.get("small_inputs_only", False) else M
_S = XS if kwargs.get("small_inputs_only", False) else S
if hasattr(op, "rhs_make_tensor_kwargs"):
exclude_zero = op.rhs_make_tensor_kwargs.get("exclude_zero", False)
make_arg = partial(
make_tensor,
device=device,
dtype=dtype,
requires_grad=requires_grad,
exclude_zero=exclude_zero,
)
shapes = (
((), ()),
((_S,), ()),
((_S, 1), (_S,)),
((_M, _S), ()),
((_S, _M, _S), (_M, _S)),
((_S, _M, _S), (_S, _M, _S)),
((_M, 1, _S), (_M, _S)),
((_M, 1, _S), (1, _M, _S)),
((0, 1, XS), (0, _M, XS)),
)
for shape_lhs, shape_rhs in shapes:
lhs = make_arg(shape_lhs, **op.lhs_make_tensor_kwargs)
rhs = make_arg(shape_rhs, **op.rhs_make_tensor_kwargs)
broadcasts_input = shape_lhs != torch.broadcast_shapes(shape_lhs, shape_rhs)
yield SampleInput(
lhs,
args=(rhs,),
kwargs=op.sample_kwargs(device, dtype, lhs)[0],
broadcasts_input=broadcasts_input,
)
# Metadata class for binary "universal functions (ufuncs)" that accept two
# tensor and have common properties
class BinaryUfuncInfo(OpInfo):
"""Operator information for 'universal binary functions (binary ufuncs).'
These are functions of two tensors with common properties like:
- they are elementwise functions
- the output shape is determined by the input shape
- they typically have method and inplace variants
- they typically support the out kwarg
- they typically have NumPy or SciPy references
See NumPy's universal function documentation
(https://numpy.org/doc/stable/reference/ufuncs.html) for more details
about the concept of ufuncs.
"""
def __init__(
self,
name,
*,
sample_inputs_func=sample_inputs_elementwise_binary,
reference_inputs_func=reference_inputs_elementwise_binary,
sample_kwargs=lambda device, dtype, input: ({}, {}),
error_inputs_func=None,
lhs_make_tensor_kwargs=None,
rhs_make_tensor_kwargs=None,
always_returns_bool=False, # Set to true if the op always returns bool tensors
supports_rhs_python_scalar=True, # Whether the operator allows Tensor x scalar inputs
supports_one_python_scalar=False, # Whether the operator allows scalar x tensor and tensor x scalar inputs
supports_two_python_scalars=False, # Whether the operator allows scalar x scalar inputs
**kwargs,
):
self._original_binary_ufunc_args = locals().copy()
# Elementwise binary operations perform the equivalent of test_numpy_refs
# in test_binary_ufuncs, but with additional test granularity. So the
# generic test_ops.py test is skipped because it's redundant.
common_skips = (
DecorateInfo(
unittest.skip("Skipping redundant test."),
"TestCommon",
"test_numpy_refs",
),
)
kwargs["skips"] = kwargs.get("skips", ()) + common_skips
super().__init__(
name,
sample_inputs_func=sample_inputs_func,
reference_inputs_func=reference_inputs_func,
error_inputs_func=make_error_inputs_elementwise_binary(error_inputs_func),
**kwargs,
)
self.sample_kwargs = sample_kwargs
# [lr]hs_make_tensor_kwargs are part of the OpInfo to be able to dynamically generate valid samples later on.
if lhs_make_tensor_kwargs is None:
lhs_make_tensor_kwargs = {}
self.lhs_make_tensor_kwargs = lhs_make_tensor_kwargs
if rhs_make_tensor_kwargs is None:
rhs_make_tensor_kwargs = {}
self.rhs_make_tensor_kwargs = rhs_make_tensor_kwargs
self.always_returns_bool = always_returns_bool
self.supports_rhs_python_scalar = supports_rhs_python_scalar
self.supports_one_python_scalar = supports_one_python_scalar
self.supports_two_python_scalars = supports_two_python_scalars
if self.supports_two_python_scalars:
self.supports_one_python_scalar = True
if self.supports_one_python_scalar:
assert (
supports_rhs_python_scalar
), "Can't support lhs and rhs Python scalars but not rhs scalars!"
# The following functions and classes are for testing elementwise unary operators.
def sample_inputs_elementwise_unary(
op_info, device, dtype, requires_grad, op_kwargs=None, **kwargs
):
if not op_kwargs:
op_kwargs = {}
_L = S if kwargs.get("small_inputs_only", False) else L
low, high = op_info.domain
is_floating = dtype.is_floating_point or dtype.is_complex
low = low if low is None or not is_floating else low + op_info._domain_eps
high = high if high is None or not is_floating else high - op_info._domain_eps
if (
op_info.supports_sparse_csr
or op_info.supports_sparse_csc
or op_info.supports_sparse_bsr
or op_info.supports_sparse_bsc
):
# Tensors with dim=2 for sparse compressed testing
yield SampleInput(
make_tensor(
(_L, _L),
device=device,
dtype=dtype,
low=low,
high=high,
requires_grad=requires_grad,
),
kwargs=op_kwargs,
)
else:
# Creates a 1D, empty, and scalar tensor
for shape in ((_L,), (1, 0, 3), ()):
yield SampleInput(
make_tensor(
shape,
device=device,
dtype=dtype,
low=low,
high=high,
requires_grad=requires_grad,
),
kwargs=op_kwargs,
)
# Replace values satisfying condition with a safe value. This is used to block
# out values the could cause singularity like tan(pi/2)
def _replace_values_in_tensor(tensor, condition, safe_value):
mask = condition(tensor)
tensor.masked_fill_(mask, safe_value)
# Helper to create a unary elementwise tensor with valid inputs
def _make_unary_elementwise_tensor(shape, *, op, dtype, **kwargs):
low, high = op.domain
is_floating = dtype.is_floating_point or dtype.is_complex
low = low if low is None or not is_floating else low + op._domain_eps
high = high if high is None or not is_floating else high - op._domain_eps
a = make_tensor(shape, low=low, high=high, dtype=dtype, **kwargs)
if op.reference_numerics_filter is not None and dtype is not torch.bool:
condition, safe_value = op.reference_numerics_filter
_replace_values_in_tensor(a, condition, safe_value)
return a
# Restricts the values in the tensor to the domain of the
# given elementwise unary operator
def _filter_unary_elementwise_tensor(a, *, op):
# short-circuits for boolean tensors
if a.dtype is torch.bool:
return a
low, high = op.domain
is_floating = a.dtype.is_floating_point or a.dtype.is_complex
low = low if low is None or not is_floating else low + op._domain_eps
high = high if high is None or not is_floating else high - op._domain_eps
if a.dtype is torch.uint8 and low is not None:
low = max(low, 0)
if not a.dtype.is_floating_point and not a.dtype.is_complex:
low = math.ceil(low) if low is not None else None
high = math.floor(high) if high is not None else None
if op.reference_numerics_filter is not None:
condition, safe_value = op.reference_numerics_filter
_replace_values_in_tensor(a, condition, safe_value)
if low is not None or high is not None:
if a.dtype.is_complex:
a.real.clamp_(low, high)
a.imag.clamp_(low, high)
else:
a.clamp_(min=low, max=high)
return a
def generate_elementwise_unary_tensors(op, *, device, dtype, requires_grad, **kwargs):
# Special-cases bool
if dtype is torch.bool:
tensors = (
torch.empty(0, device=device, dtype=torch.bool),
torch.tensor(True, device=device),
torch.tensor(False, device=device),
torch.tensor((True, False), device=device),
make_tensor((812,), device=device, dtype=dtype),
make_tensor((1029, 917), device=device, dtype=dtype),
)
for a in tensors:
yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0])
shapes = (
(1029, 917),
(812,),
# Empty sizes
(0,),
(0, 3, 3),
(1, 0, 5),
(6, 0, 0, 0),
(3, 0, 1, 0),
)
make_arg = partial(
_make_unary_elementwise_tensor,
op=op,
device=device,
dtype=dtype,
requires_grad=requires_grad,
)
for shape in shapes:
a = make_arg(shape)
yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0])
def generate_elementwise_unary_small_value_tensors(
op, *, device, dtype, requires_grad=False
):
for sample in generate_elementwise_binary_small_value_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad
):
a = _filter_unary_elementwise_tensor(sample.input, op=op)
yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0])
def generate_elementwise_unary_large_value_tensors(
op, *, device, dtype, requires_grad=False
):
for sample in generate_elementwise_binary_large_value_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad
):
a = _filter_unary_elementwise_tensor(sample.input, op=op)
yield SampleInput(sample.input, kwargs=op.sample_kwargs(device, dtype, a)[0])
def generate_elementwise_unary_extremal_value_tensors(
op, *, device, dtype, requires_grad=False
):
for sample in generate_elementwise_binary_extremal_value_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad
):
yield SampleInput(
sample.input, kwargs=op.sample_kwargs(device, dtype, sample.input)[0]
)
def generate_elementwise_unary_noncontiguous_tensors(
op, *, device, dtype, requires_grad=False
):
make_arg = partial(
_make_unary_elementwise_tensor,
op=op,
device=device,
dtype=dtype,
requires_grad=requires_grad,
)
# Generic noncontiguity
t = make_arg((1026,), noncontiguous=True)
yield SampleInput(t, kwargs=op.sample_kwargs(device, dtype, t)[0])
# Transposed
t = make_arg((1024, 1024)).T
yield SampleInput(t, kwargs=op.sample_kwargs(device, dtype, t)[0])
# Expanded tensors
shapes = ((1, 3), (1, 7), (5, 7))
for shape in shapes:
t = make_arg(shape)
t_non_contig = t.expand(3, -1, -1)
yield SampleInput(
t_non_contig, kwargs=op.sample_kwargs(device, dtype, t_non_contig)[0]
)
def generate_elementwise_unary_arbitrarily_strided_tensors(
op, *, device, dtype, requires_grad=False
):
# shape, strides, offset
strided_cases = (
((5, 6, 2), (1, 1, 7), 2),
((5, 5, 4), (1, 1, 7), 2),
((5, 5, 2), (4, 5, 7), 3),
((5, 5, 2), (5, 5, 7), 3),
((5, 5, 2), (5, 5, 5), 3),
((9, 5, 2), (0, 1, 7), 3),
)
make_arg = partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad
)
for shape, strides, offset in strided_cases:
a = make_arg(
500,
).as_strided(shape, strides, offset)
yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0])
# Reuses the elementwise binary generators for consistency
# TODO: in the future generalize the reference generators to handle n-ary elementwise operations
def _reference_inputs_elementwise_unary(op, device, dtype, requires_grad, **kwargs):
yield from op.sample_inputs_func(op, device, dtype, requires_grad, **kwargs)
yield from generate_elementwise_unary_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs
)
if dtype is not torch.bool:
yield from generate_elementwise_unary_small_value_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs
)
if dtype not in (torch.bool, torch.uint8, torch.int8) and (
op.handles_large_floats
or (not dtype.is_floating_point and not dtype.is_complex)
):
yield from generate_elementwise_unary_large_value_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs
)
if dtype.is_floating_point or (
op.handles_complex_extremal_values and dtype.is_complex
):
yield from generate_elementwise_unary_extremal_value_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs
)
def reference_inputs_elementwise_unary(op, device, dtype, requires_grad, **kwargs):
gen = partial(
_reference_inputs_elementwise_unary, op, device, dtype, requires_grad, **kwargs
)
# yields "normal" samples
yield from gen()
# yields noncontiguous samples
for sample in gen():
yield sample.noncontiguous()
yield from generate_elementwise_unary_noncontiguous_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs
)
yield from generate_elementwise_unary_arbitrarily_strided_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs
)
# Metadata class for unary "universal functions (ufuncs)" that accept a single
# tensor and have common properties like:
class UnaryUfuncInfo(OpInfo):
"""Operator information for 'universal unary functions (unary ufuncs).'
These are functions of a single tensor with common properties like:
- they are elementwise functions
- the input shape is the output shape
- they typically have method and inplace variants
- they typically support the out kwarg
- they typically have NumPy or SciPy references
See NumPy's universal function documentation
(https://numpy.org/doc/1.18/reference/ufuncs.html) for more details
about the concept of ufuncs.
"""
def __init__(
self,
name, # the string name of the function
*,
dtypes=floating_types(),
domain=(None, None), # the [low, high) domain of the function
handles_complex_extremal_values=True, # whether the op correctly handles extremal values (like nan/inf)
handles_large_floats=True, # whether the op correctly handles large float values (like 1e20)
supports_complex_to_float=False, # op supports casting from complex input to real output safely eg. angle
sample_inputs_func=sample_inputs_elementwise_unary,
reference_inputs_func=reference_inputs_elementwise_unary,
sample_kwargs=lambda device, dtype, input: ({}, {}),
reference_numerics_filter=None, # Filters values in the range of the domain specified above but that should not be tested
**kwargs,
):
self._original_unary_ufunc_args = locals().copy()
super().__init__(
name,
dtypes=dtypes,
sample_inputs_func=sample_inputs_func,
reference_inputs_func=reference_inputs_func,
**kwargs,
)
self.domain = domain
self.handles_complex_extremal_values = handles_complex_extremal_values
self.handles_large_floats = handles_large_floats
self.supports_complex_to_float = supports_complex_to_float
self.reference_numerics_filter = reference_numerics_filter
# test_unary_ufuncs.py generates its own inputs to test the consistency
# of the operator on sliced tensors, non-contig tensors, etc.
# `sample_kwargs` is a utility function to provide kwargs
# along with those inputs if required (eg. clamp).
# It should return two dictionaries, first holding kwarg for
# torch operator and second one for reference NumPy operator.
self.sample_kwargs = sample_kwargs
# Epsilon to ensure grad and gradgrad checks don't test values
# outside a function's domain.
self._domain_eps = 1e-5
def sample_inputs_spectral_ops(self, device, dtype, requires_grad=False, **kwargs):
is_fp16_or_chalf = dtype == torch.complex32 or dtype == torch.half
if not is_fp16_or_chalf:
nd_tensor = partial(
make_tensor,
(S, S + 1, S + 2),
device=device,
dtype=dtype,
requires_grad=requires_grad,
)
oned_tensor = partial(
make_tensor, (31,), device=device, dtype=dtype, requires_grad=requires_grad
)
else:
# cuFFT supports powers of 2 for half and complex half precision
# NOTE: For hfft, hfft2, hfftn, irfft, irfft2, irfftn with default args
# where output_size n=2*(input_size - 1), we make sure that logical fft size is a power of two
low = None
high = None
if self.name in ["fft.hfft", "fft.irfft", "_refs.fft.hfft", "_refs.fft.irfft"]:
shapes = ((2, 9, 9), (33,))
elif self.name in [
"fft.hfft2",
"fft.irfft2",
"_refs.fft.hfft2",
"_refs.fft.irfft2",
]:
shapes = ((2, 8, 9), (33,))
elif self.name in [
"fft.hfftn",
"fft.irfftn",
"_refs.fft.hfftn",
"_refs.fft.irfftn",
]:
shapes = ((2, 2, 33), (33,))
# Adjusting the limits because the test would be flaky due to over-saturation of float16
# See: https://github.com/pytorch/pytorch/pull/81416
low = -1.0
high = 1.0
else:
shapes = ((2, 8, 16), (32,))
nd_tensor = partial(
make_tensor,
shapes[0],
device=device,
low=low,
high=high,
dtype=dtype,
requires_grad=requires_grad,
)
oned_tensor = partial(
make_tensor,
shapes[1],
device=device,
low=low,
high=high,
dtype=dtype,
requires_grad=requires_grad,
)
if self.ndimensional == SpectralFuncType.ND:
yield SampleInput(
nd_tensor(),
s=(3, 10) if not is_fp16_or_chalf else (4, 8),
dim=(1, 2),
norm="ortho",
)
yield SampleInput(nd_tensor(), norm="ortho")
yield SampleInput(nd_tensor(), s=(8,))
yield SampleInput(oned_tensor())
yield from (SampleInput(nd_tensor(), dim=dim) for dim in [-1, -2, -3, (0, -1)])
elif self.ndimensional == SpectralFuncType.TwoD:
yield SampleInput(
nd_tensor(),
s=(3, 10) if not is_fp16_or_chalf else (4, 8),
dim=(1, 2),
norm="ortho",
)
yield SampleInput(nd_tensor(), norm="ortho")
yield SampleInput(nd_tensor(), s=(6, 8) if not is_fp16_or_chalf else (4, 8))
yield SampleInput(nd_tensor(), dim=0)
yield SampleInput(nd_tensor(), dim=(0, -1))
yield SampleInput(nd_tensor(), dim=(-3, -2, -1))
else:
yield SampleInput(
nd_tensor(),
n=10 if not is_fp16_or_chalf else 8,
dim=1,
norm="ortho",
)
yield SampleInput(nd_tensor(), norm="ortho")
yield SampleInput(nd_tensor(), n=7 if not is_fp16_or_chalf else 8)
yield SampleInput(oned_tensor())
yield from (SampleInput(nd_tensor(), dim=dim) for dim in [-1, -2, -3])
SpectralFuncType = Enum("SpectralFuncType", ("OneD", "TwoD", "ND"))
# Metadata class for Fast Fourier Transforms in torch.fft.
class SpectralFuncInfo(OpInfo):
"""Operator information for torch.fft transforms."""
def __init__(
self,
name, # the string name of the function
*,
ref=None, # Reference implementation (probably in np.fft namespace)
dtypes=floating_and_complex_types(),
ndimensional: SpectralFuncType,
sample_inputs_func=sample_inputs_spectral_ops,
decorators=None,
**kwargs,
):
self._original_spectral_func_args = dict(locals()).copy()
self._original_spectral_func_args.update(kwargs)
decorators = list(decorators) if decorators is not None else []
decorators += [
skipCPUIfNoFFT,
DecorateInfo(
toleranceOverride({torch.chalf: tol(4e-2, 4e-2)}),
"TestCommon",
"test_complex_half_reference_testing",
),
]
super().__init__(
name=name,
dtypes=dtypes,
decorators=decorators,
sample_inputs_func=sample_inputs_func,
**kwargs,
)
self.ref = ref
self.ndimensional = ndimensional
class ShapeFuncInfo(OpInfo):
"""Early version of a specialized OpInfo for Shape manipulating operations like tile and roll"""
def __init__(
self,
name, # the string name of the function
*,
ref, # a reference function
dtypes=floating_types(),
dtypesIfCUDA=None,
dtypesIfROCM=None,
dtypesIfXPU=None,
sample_inputs_func=None,
**kwargs,
):
super().__init__(
name,
dtypes=dtypes,
dtypesIfCUDA=dtypesIfCUDA,
dtypesIfROCM=dtypesIfROCM,
dtypesIfXPU=dtypesIfXPU,
sample_inputs_func=sample_inputs_func,
**kwargs,
)
self.ref = ref
def sample_inputs_foreach(
self,
device,
dtype,
N,
*,
noncontiguous=False,
same_size=False,
low=None,
high=None,
# zero_size means EVERY input is empty
zero_size: bool,
requires_grad: bool,
# mutually exclusive from same_size and zero_size, which are all or nothing
intersperse_empty_tensors: bool = False,
):
if zero_size:
return [torch.empty(0, dtype=dtype, device=device) for _ in range(N)]
if same_size:
return [
make_tensor(
(N, N),
dtype=dtype,
device=device,
noncontiguous=noncontiguous,
low=low,
high=high,
requires_grad=requires_grad,
)
for _ in range(N)
]
else:
# interweave some empty tensors + have the last 2 tensors be empty (see #100701)
return [
torch.empty(0, dtype=dtype, device=device, requires_grad=requires_grad)
if (i % 3 == 0 or i >= N - 2) and intersperse_empty_tensors
else make_tensor(
(N - i, N - i),
dtype=dtype,
device=device,
noncontiguous=noncontiguous,
low=low,
high=high,
requires_grad=requires_grad,
)
for i in range(N)
]
def get_foreach_method_names(name):
# get torch inplace reference function
op_name = "_foreach_" + name
inplace_op_name = op_name + "_"
op = getattr(torch, op_name, None)
inplace_op = getattr(torch, inplace_op_name, None)
ref = getattr(torch, name, None)
ref_inplace = getattr(torch.Tensor, name + "_", None)
return op, inplace_op, ref, ref_inplace
@dataclass
class ForeachFuncInfo(OpInfo):
"""Early version of a specialized OpInfo for foreach functions
The main differences from the parent class are (a) `dtypes`, `dtypesIfCUDA`, and `dtypesIfROCM`
are set to `get_all_dtypes(include_qint=False)`, and (b) the following arguments.
``supports_alpha_param=True`` means that the function supports a python scalar (``numbers.Number``)
as the last keyword argument such as `_foreach_add`.
``supports_scalar_self_arg=True`` means that the function can take a python scalar as its first argument.
Currently only `_foreach_pow` supports this.
``backward_requires_result=True``, which could sound self-explanatory, means that the function uses
the forward result for its backward computation.
"""
supports_alpha_param: bool = False
supports_scalar_self_arg: bool = False
backward_requires_result: bool = False
def __post_init__(self):
(
foreach_method,
foreach_method_inplace,
torch_ref_method,
torch_ref_inplace,
) = get_foreach_method_names(self.name)
if not self.supports_out:
# note(crcrpar): `foreach_method` for `"zero"` is `None` but `None` would call
# `_getattr_qual` in `OpInfo.__post_init__` which should fail since `_foreach_zero`
# is not defined at the moment. Thus to skip the qualification, set a similar torch
# function.
assert foreach_method is None
assert torch_ref_method is None
foreach_method = foreach_method_inplace
torch_ref_method = torch_ref_inplace
# We disable all complex128 tests internally for foreach due to reported flakiness
# tracked in #139648
supported_dtypes = get_all_dtypes(include_qint=False)
if IS_FBCODE:
supported_dtypes = [
x for x in supported_dtypes if x is not torch.complex128
]
self.dtypes = _dispatch_dtypes(supported_dtypes)
self.op = foreach_method
self.method_variant = foreach_method
self.ref = torch_ref_method
self.inplace_variant = foreach_method_inplace
self.ref_inplace = torch_ref_inplace
self.has_no_in_place = self.inplace_variant is None
name = self.name
self.name = f"_foreach_{name}"
if name == "norm":
self.ref = torch.linalg.vector_norm
elif name == "minimum":
# because minimum ref does not support inplace or scalar
self.ref = torch.clamp_max
self.ref_inplace = torch.Tensor.clamp_max_
elif name == "maximum":
# because maximum ref does not support inplace or scalar
self.ref = torch.clamp_min
self.ref_inplace = torch.Tensor.clamp_min_
# The following sets `dtypesIfCUDA` and `dtypesIfROCM` accordingly.
super().__post_init__()
def sample_zero_size_inputs(self, device, dtype, requires_grad=False, **kwargs):
if not hasattr(self.sample_inputs_func, "sample_zero_size_tensor_inputs"):
return []
return self.sample_inputs_func.sample_zero_size_tensor_inputs(
self, device, dtype, requires_grad, **kwargs
)
def gradcheck_wrapper_hermitian_input(op, input, *args, **kwargs):
"""Gradcheck wrapper for functions that take Hermitian matrices as input.
They require a modified function because the finite-difference algorithm
for calculating derivatives does not preserve the Hermitian property of the input.
"""
return op(input + input.mH, *args, **kwargs)
def gradcheck_wrapper_triangular_input(op, *args, upper=False, idx=0, **kwargs):
"""Gradcheck wrapper for functions that take lower or upper triangular matrices as input.
They require a modified function because the finite-difference algorithm
for calculating derivatives does not preserve the triangular property of the input.
`idx` is used to specific which `args[idx]` is to be triangularized.
"""
triangular_arg = args[idx].triu() if upper else args[idx].tril()
return op(*args[:idx], triangular_arg, *args[idx + 1 :], upper, **kwargs)
def gradcheck_wrapper_triangular_input_real_positive_diagonal(
op, *args, upper=False, idx=0, **kwargs
):
"""Gradcheck wrapper for functions that take lower/upper triangular matrices
with real and positive diagonals, for example, cholesky-like operations.
"""
arg = args[idx]
arg_diag = arg.diagonal(0, -2, -1)
arg_diag_embed = torch.diag_embed(arg_diag)
id_diag_tensor = torch.ones_like(arg_diag)
id_tensor = torch.diag_embed(id_diag_tensor)
# new_arg = arg - diag(arg) + I
new_arg = arg - arg_diag_embed + id_tensor
return gradcheck_wrapper_triangular_input(
op, *args[:idx], new_arg, *args[idx + 1 :], upper=upper, idx=idx, **kwargs
)
def gradcheck_wrapper_masked_operation(op, input, *args, **kwargs):
"""Gradcheck wrapper for masked operations.
When mask is specified, replaces masked-out elements with zeros.
Use for operations that produce non-finite masked-out elements,
for instance, for minimum and maximum reductions.
"""
output = op(input, *args, **kwargs)
mask = kwargs.get("mask")
if mask is not None:
output_mask = torch.masked._output_mask(op, input, *args, **kwargs)
output = torch.where(output_mask, output, output.new_zeros([]))
return output
def gradcheck_wrapper_masked_pointwise_operation(op, input, *args, **kwargs):
"""Gradcheck wrapper for masked pointwise operations. Assumes that the result
will be masked iff both tensors are masked at a specific index
When mask is specified, replaces masked-out elements with zeros.
Use for operations that produce non-finite masked-out elements,
for instance, for minimum and maximum reductions.
"""
output = op(input, *args, **kwargs)
input_mask = kwargs.get("input_mask")
other_mask = kwargs.get("other_mask")
if input_mask is not None and other_mask is not None:
combined_mask = torch.logical_and(input_mask, other_mask)
new_kwargs = dict(mask=combined_mask, **kwargs)
output_mask = torch.masked._input_mask(input, *args, **new_kwargs)
output = torch.where(output_mask, output, output.new_zeros([]))
return output
def clone_sample(sample, **kwargs):
"""
Given a SampleInput, this function analyzes its input, args and kwargs,
and produces a copy with each non-Tensor entry being copied by reference,
and with each Tensor entry cloned with `t.clone().requires_grad_(t.requires_grad)`
"""
def clone_tensor(t):
if isinstance(t, torch.Tensor):
return t.detach().clone().requires_grad_(t.requires_grad)
else:
return t
sample_kwargs = kwargs if kwargs else sample.kwargs
return SampleInput(
clone_tensor(sample.input),
args=tuple(map(clone_tensor, sample.args)),
kwargs={k: clone_tensor(v) for k, v in sample_kwargs.items()},
)
```
|
==============================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.47 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\opinfo\definitions\__init__.py
ENCODING: utf-8
```py
# mypy: ignore-errors
from torch.testing._internal.opinfo.core import OpInfo
from torch.testing._internal.opinfo.definitions import (
_masked,
fft,
linalg,
signal,
special,
)
# Operator database
op_db: list[OpInfo] = [
*fft.op_db,
*linalg.op_db,
*signal.op_db,
*special.op_db,
*_masked.op_db,
]
python_ref_db: list[OpInfo] = [
*fft.python_ref_db,
*linalg.python_ref_db,
*special.python_ref_db,
]
```
|
=============================================================================================================================================
SOURCE CODE FILE: _masked.py
LINES: 1
SIZE: 46.53 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\opinfo\definitions\_masked.py
ENCODING: utf-8
```py
# mypy: ignore-errors
import unittest
from collections.abc import Sequence
from functools import partial
import numpy as np
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_device_type import tol, toleranceOverride
from torch.testing._internal.common_dtype import (
all_types_and,
all_types_and_complex_and,
complex_types,
floating_and_complex_types_and,
floating_types_and,
integral_types,
)
from torch.testing._internal.opinfo.core import (
DecorateInfo,
gradcheck_wrapper_masked_operation,
gradcheck_wrapper_masked_pointwise_operation,
M,
OpInfo,
ReductionOpInfo,
S,
sample_inputs_reduction,
SampleInput,
)
from torch.testing._internal.opinfo.utils import prod_numpy, reference_reduction_numpy
# Used for log_softmax, softmax, softmin
def sample_inputs_softmax_variant(
op_info,
device,
dtype,
requires_grad,
with_dtype=False,
use_zero_dimensions=True,
**kwargs,
):
make_arg = partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad
)
cases = [
((S,), (0,)),
((S, S), (0,)),
((S, S), (1,)),
((S, S), (-1,)),
((S, M, S), (2,)),
*([((S, 0, 0), (-1,))] if use_zero_dimensions else []),
]
kwargs = dict(dtype=torch.float64) if with_dtype else None
# PyTorch on XLA throws an error when passed with dim argument for 0d tensor.
# See https://github.com/pytorch/xla/issues/3061 for more details.
if torch.device(device).type != "xla":
cases.append(((), (0,)))
return (
SampleInput(make_arg(shape), args=dim, kwargs=kwargs) for shape, dim in cases
)
def _generate_masked_op_mask(input_shape, device, **kwargs):
make_arg = partial(
make_tensor, dtype=torch.bool, device=device, requires_grad=False
)
yield None
yield make_arg(input_shape)
if len(input_shape) > 2:
# broadcast last mask dimension:
yield make_arg(input_shape[:-1] + (1,))
# broadcast middle mask dimension:
yield make_arg(input_shape[:1] + (1,) + input_shape[2:])
# broadcast first mask dimension:
yield make_arg((1,) + input_shape[1:])
# mask.ndim < input.ndim
yield make_arg(input_shape[1:])
# mask.ndim == 1
yield make_arg(input_shape[-1:])
# masks that require broadcasting of inputs (mask.ndim >
# input.ndim) will not be supported, however, we may
# reconsider this if there will be demand on this kind of
# degenerate cases.
def sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for masked reduction operators.
Masked reduction operator is a reduction operator with trailing
mask optional argument. A mask is a bool tensor with the same
shape as input or a shape that is broadcastable to input shape.
"""
kwargs["supports_multiple_dims"] = op_info.supports_multiple_dims
for sample_input in sample_inputs_reduction(
op_info, device, dtype, requires_grad, **kwargs
):
for mask in _generate_masked_op_mask(
sample_input.input.shape, device, **kwargs
):
sample_input_args, sample_input_kwargs = sample_input.args, dict(
mask=mask, **sample_input.kwargs
)
yield SampleInput(
sample_input.input.detach().requires_grad_(requires_grad),
args=sample_input_args,
kwargs=sample_input_kwargs,
)
if (
not requires_grad
and dtype.is_floating_point
and sample_input.input.ndim == 2
and mask is not None
and mask.shape == sample_input.input.shape
):
for v in [torch.inf, -torch.inf, torch.nan]:
t = sample_input.input.detach()
t.diagonal(0, -2, -1).fill_(v)
yield SampleInput(
t.requires_grad_(requires_grad),
args=sample_input_args,
kwargs=sample_input_kwargs,
)
def sample_inputs_sparse_coo_masked_reduction(
op_info, device, dtype, requires_grad, **kwargs
):
"""Sample inputs for masked reduction operators that support inputs
with sparse coo layouts.
"""
if op_info.supports_sparse:
op_name = op_info.name.replace("masked.", "")
for sample_input in sample_inputs_masked_reduction(
op_info, device, dtype, requires_grad, **kwargs
):
mask = sample_input.kwargs.get("mask")
if mask is not None:
sample_input_kwargs = sample_input.kwargs.copy()
sample_input_kwargs.update(mask=mask.to_sparse())
yield SampleInput(
sample_input.input.to_sparse(),
args=sample_input.args,
kwargs=sample_input_kwargs,
)
else:
if op_name in {"prod", "amax", "amin"}:
# FIXME: for now reductions with non-zero reduction identity and
# unspecified mask are not supported for sparse COO
# tensors, see torch.masked.prod implementation
# for details.
continue
yield SampleInput(
sample_input.input.to_sparse(),
args=sample_input.args,
kwargs=sample_input.kwargs,
)
def sample_inputs_sparse_csr_masked_reduction(
op_info, device, dtype, requires_grad, **kwargs
):
"""Sample inputs for masked reduction operators that support inputs
with sparse csr layouts.
"""
if op_info.supports_sparse_csr:
op_name = op_info.name.replace("masked.", "")
for sample_input in sample_inputs_masked_reduction(
op_info, device, dtype, requires_grad, **kwargs
):
if not (
sample_input.input.ndim == 2 and sample_input.kwargs.get("keepdim")
):
# - sparse CSR tensors are always 2-D tensors
# - masked reduction on CSR tensors are defined only if keepdim is True.
continue
mask = sample_input.kwargs.get("mask")
if mask is not None:
sample_input_kwargs = sample_input.kwargs.copy()
sample_input_kwargs.update(mask=mask.to_sparse_csr())
new_sample = SampleInput(
sample_input.input.to_sparse_csr(),
args=sample_input.args,
kwargs=sample_input_kwargs,
)
else:
if op_name in ["prod", "amax", "amin", "mean"]:
# reductions with non-zero reduction identity and
# unspecified mask is not supported for sparse CSR
# tensors, see torch.masked.prod implementation
# for details.
continue
new_sample = SampleInput(
sample_input.input.to_sparse_csr(),
args=sample_input.args,
kwargs=sample_input.kwargs,
)
yield new_sample
if sample_input.kwargs["dim"] == 0:
# Reductions of CSR tensors use different implementations for
# inner and/or outer dimensions. So, as a minimum of testing CSR
# implementations the following kwargs must be generated:
# dict(dim=0, keepdim=True)
# dict(dim=1, keepdim=True)
# dict(dim=(0, 1), keepdim=True)
# Here we generate the dim=1 case from the dim=0 case.
sample_input_kwargs = new_sample.kwargs.copy()
sample_input_kwargs.update(dim=1)
yield SampleInput(
new_sample.input.clone(),
args=sample_input.args,
kwargs=sample_input_kwargs,
)
def sample_inputs_masked_norm(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for masked norm."""
for ord in [2.0, 1, float("inf"), float("-inf"), 0]:
for sample_input in sample_inputs_masked_reduction(
op_info, device, dtype, requires_grad, **kwargs
):
sample_input_args, sample_input_kwargs = (
ord,
) + sample_input.args, sample_input.kwargs.copy()
yield SampleInput(
sample_input.input.clone().requires_grad_(requires_grad),
args=sample_input_args,
kwargs=sample_input_kwargs,
)
def reference_masked_std_var(
numpy_fn,
):
ref = reference_reduction_numpy(numpy_fn)
# Translate unbiased or correction arguments into ddof
def func(
input,
dim=None,
unbiased=None,
*,
correction=None,
**kwargs,
):
ddof = 1
if unbiased is not None:
ddof = 1 if unbiased else 0
if correction is not None:
ddof = correction
if isinstance(dim, Sequence):
dim = tuple(dim)
return ref(input, dim, ddof=ddof, **kwargs)
return func
def sample_inputs_masked_std_var(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for masked std/var."""
kwargs["supports_multiple_dims"] = op_info.supports_multiple_dims
from torch.testing._internal.common_methods_invocations import sample_inputs_std_var
def masked_samples():
for sample_input in sample_inputs_std_var(
op_info, device, dtype, requires_grad, **kwargs
):
if len(sample_input.args) and isinstance(sample_input.args[0], bool):
continue # masked.{std, var} doesn't support `.var(unbiased)`
for mask in _generate_masked_op_mask(
sample_input.input.shape, device, **kwargs
):
sample_input_args, sample_input_kwargs = sample_input.args, dict(
mask=mask, **sample_input.kwargs
)
yield SampleInput(
sample_input.input.detach().requires_grad_(requires_grad),
args=sample_input_args,
kwargs=sample_input_kwargs,
)
if (
not requires_grad
and dtype.is_floating_point
and sample_input.input.ndim == 2
and mask is not None
and mask.shape == sample_input.input.shape
):
for v in [torch.inf, -torch.inf, torch.nan]:
t = sample_input.input.detach()
t.diagonal(0, -2, -1).fill_(v)
yield SampleInput(
t.requires_grad_(requires_grad),
args=sample_input_args,
kwargs=sample_input_kwargs,
)
for sample_input in masked_samples():
correction = sample_input.kwargs.get("correction")
if correction is None:
correction = int(sample_input.kwargs.get("unbiased", True))
dim = sample_input.kwargs.get("dim", None)
if sample_input.kwargs.get("mask") is None:
orig_count = torch.masked.sum(
torch.ones(sample_input.input.shape, dtype=torch.int64),
dim,
keepdim=True,
)
else:
inmask = torch.masked._input_mask(
sample_input.input, *sample_input.args, **sample_input.kwargs
)
orig_count = torch.masked.sum(
inmask.new_ones(sample_input.input.shape, dtype=torch.int64),
dim,
keepdim=True,
mask=inmask,
)
if orig_count.min() <= correction + 1:
# Skip samples that lead to nans in var computation
continue
yield sample_input
def sample_inputs_masked_softmax(
op_info, device, dtype, requires_grad, with_dtype=False, **kwargs
):
"""Sample inputs for masked softmax, log_softmax, and softmin.
Masked normalization operator is a reduction operator with
trailing mask optional argument. A mask is a bool tensor with the
same shape as input or a shape that is broadcastable to input
shape.
"""
for sample_input in sample_inputs_softmax_variant(
op_info, device, dtype, requires_grad, with_dtype=with_dtype, **kwargs
):
for mask in _generate_masked_op_mask(
sample_input.input.shape, device, **kwargs
):
yield SampleInput(
sample_input.input.clone().requires_grad_(requires_grad),
*sample_input.args,
mask=mask,
**sample_input.kwargs,
)
def sample_inputs_masked_cumops(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for masked cumsum and cumprod."""
for sample_input in sample_inputs_softmax_variant(
op_info, device, dtype, requires_grad, **kwargs
):
for mask in _generate_masked_op_mask(
sample_input.input.shape, device, **kwargs
):
if type(mask) != torch.Tensor:
continue
sample_input_args, sample_input_kwargs = sample_input.args, dict(
mask=mask, **sample_input.kwargs
)
if "keepdim" in sample_input_kwargs:
sample_input_kwargs.pop("keepdim")
# dimension is required
if sample_input_args:
dim = sample_input.args[0]
else:
if "dim" not in sample_input_kwargs:
continue
dim = sample_input_kwargs.pop("dim")
sample_input_args = (dim,)
yield SampleInput(
sample_input.input.clone().requires_grad_(requires_grad),
*sample_input_args,
**sample_input_kwargs,
)
def sample_inputs_masked_logaddexp(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for masked logaddexp."""
shapes = [(S,), (S, S), (S, M, S)]
input_mask_lists = [
list(_generate_masked_op_mask(shape, device, **kwargs)) for shape in shapes
]
other_mask_lists = [
list(_generate_masked_op_mask(shape, device, **kwargs)) for shape in shapes
]
make_arg = partial(
make_tensor, dtype=dtype, device=device, requires_grad=requires_grad
)
for shape, input_masks, other_masks in zip(
shapes, input_mask_lists, other_mask_lists
):
for input_mask, other_mask in zip(input_masks, other_masks):
yield SampleInput(
make_arg(shape),
make_arg(shape),
input_mask=input_mask,
other_mask=other_mask,
)
def sample_inputs_masked_normalize(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for masked normalize."""
for ord in [2.0, 1, float("inf"), float("-inf"), 0]:
for sample_input in sample_inputs_softmax_variant(
op_info, device, dtype, requires_grad, use_zero_dimensions=False, **kwargs
):
yield SampleInput(
sample_input.input.clone().requires_grad_(requires_grad),
ord,
*sample_input.args,
**sample_input.kwargs,
)
op_db: list[OpInfo] = [
ReductionOpInfo(
"masked.sum",
ref=reference_reduction_numpy(np.sum),
method_variant=None,
identity=0,
nan_policy="propagate",
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
promotes_int_to_int64=True,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
skips=(
DecorateInfo(
unittest.skip("Failing on some jobs"),
"TestReductions",
"test_reference_masked",
dtypes=(torch.bool, torch.int8, torch.int16, torch.int32),
),
DecorateInfo(
unittest.expectedFailure,
"TestNormalizeOperators",
"test_normalize_operator_exhaustive",
),
# FIXME: sum reduces all dimensions when dim=[]
DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"),
DecorateInfo(
unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim"
),
# RuntimeError: undefined value tensor
DecorateInfo(
unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"
),
),
decorators=[
DecorateInfo(
toleranceOverride(
{
torch.bfloat16: tol(atol=1e-03, rtol=5e-2),
torch.float16: tol(atol=1e-03, rtol=5e-3),
}
),
"TestReductions",
"test_reference_masked",
),
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}),
"TestReductions",
"test_ref_small_input",
),
DecorateInfo(
toleranceOverride(
{
torch.bfloat16: tol(atol=0.1, rtol=0.1),
torch.float16: tol(atol=5e-3, rtol=5e-3),
}
),
"TestMasked",
"test_mask_layout",
),
],
sample_inputs_func=sample_inputs_masked_reduction,
sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction,
sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction,
),
ReductionOpInfo(
"masked.prod",
ref=prod_numpy,
method_variant=None,
identity=1,
nan_policy="propagate",
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
promotes_int_to_int64=True,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
skips=(
DecorateInfo(
unittest.expectedFailure,
"TestNormalizeOperators",
"test_normalize_operator_exhaustive",
),
DecorateInfo(
unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"
),
DecorateInfo(
unittest.skip("Failing on some jobs"),
"TestReductions",
"test_reference_masked",
dtypes=(torch.bool, torch.int8, torch.int16, torch.int32),
),
DecorateInfo(
"TestReductions",
"test_ref_small_input",
dtypes=(torch.int8, torch.int16, torch.int32),
),
# FIXME: "cuda_scatter_gather_base_kernel_func" not implemented for ... (used for sparse_coo inputs)
DecorateInfo(
unittest.skip("Skipped!"),
"TestMasked",
"test_mask_layout",
device_type="cuda",
dtypes=(torch.bool, *integral_types(), *complex_types()),
),
),
decorators=[
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-02)}),
"TestReductions",
"test_reference_masked",
),
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}),
"TestReductions",
"test_ref_duplicate_values",
),
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}),
"TestReductions",
"test_ref_small_input",
),
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1.5e-03)}),
"TestMasked",
"test_mask_layout",
device_type="cpu",
),
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1e-05)}),
"TestOperators",
"test_jvp",
device_type="cuda",
),
],
sample_inputs_func=sample_inputs_masked_reduction,
sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction,
sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction,
),
OpInfo(
"masked.cumsum",
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
method_variant=None,
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
DecorateInfo(
unittest.expectedFailure,
"TestNormalizeOperators",
"test_normalize_operator_exhaustive",
),
# NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults
DecorateInfo(
unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit"
),
),
# Can reuse the same inputs; dim is required in both
sample_inputs_func=sample_inputs_masked_cumops,
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
),
OpInfo(
"masked.cumprod",
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
method_variant=None,
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults
DecorateInfo(
unittest.expectedFailure,
"TestNormalizeOperators",
"test_normalize_operator_exhaustive",
),
# NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults
DecorateInfo(
unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit"
),
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-5)}),
"TestCompositeCompliance",
"test_backward",
device_type="cuda",
),
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-2, rtol=2.6e-3)}),
"TestInductorOpInfo",
"test_comprehensive",
device_type="cuda",
),
),
# Can reuse the same inputs; dim is required in both
sample_inputs_func=sample_inputs_masked_cumops,
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
),
ReductionOpInfo(
"masked.amax",
nan_policy="propagate",
supports_out=False,
dtypes=all_types_and(torch.float16, torch.bfloat16),
supports_sparse=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse_csr=True,
ref=reference_reduction_numpy(np.amax),
skips=(
DecorateInfo(
unittest.expectedFailure,
"TestNormalizeOperators",
"test_normalize_operator_exhaustive",
),
# FIXME: amax reduces all dimensions when dim=[]
DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"),
DecorateInfo(
unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim"
),
# RuntimeError: Unknown builtin op: aten::iinfo
DecorateInfo(
unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit"
),
# FIXME: "cuda_scatter_gather_base_kernel_func" not implemented for ... (used for sparse_coo inputs)
# FIXME: "_segment_reduce_lengths_cpu/cuda" not implemented for ... (used for sparse_csr inputs)
DecorateInfo(
unittest.skip("Skipped!"),
"TestMasked",
"test_mask_layout",
dtypes=(torch.bool, *integral_types(), *complex_types()),
),
),
sample_inputs_func=sample_inputs_masked_reduction,
sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction,
sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction,
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
),
ReductionOpInfo(
"masked.amin",
nan_policy="propagate",
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=all_types_and(torch.float16, torch.bfloat16),
supports_sparse=True,
supports_sparse_csr=True,
ref=reference_reduction_numpy(np.amin),
skips=(
DecorateInfo(
unittest.expectedFailure,
"TestNormalizeOperators",
"test_normalize_operator_exhaustive",
),
# FIXME: amax reduces all dimensions when dim=[]
DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"),
DecorateInfo(
unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim"
),
# RuntimeError: Unknown builtin op: aten::iinfo
DecorateInfo(
unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"
),
# FIXME: "cuda_scatter_gather_base_kernel_func" not implemented for ... (used for sparse_coo inputs)
# FIXME: "_segment_reduce_lengths_cpu/cuda" not implemented for ... (used for sparse_csr inputs)
DecorateInfo(
unittest.skip("Skipped!"),
"TestMasked",
"test_mask_layout",
dtypes=(torch.bool, *integral_types(), *complex_types()),
),
),
sample_inputs_func=sample_inputs_masked_reduction,
sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction,
sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction,
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
),
ReductionOpInfo(
"masked.argmax",
supports_out=False,
supports_multiple_dims=False,
supports_autograd=False,
dtypes=all_types_and(torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.argmax, supports_keepdims=False),
skips=(
DecorateInfo(
unittest.expectedFailure,
"TestNormalizeOperators",
"test_normalize_operator_exhaustive",
),
# initial is not a keyword for argmax
DecorateInfo(
unittest.expectedFailure, "TestReductions", "test_reference_masked"
),
# NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults
DecorateInfo(
unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"
),
),
sample_inputs_func=sample_inputs_masked_reduction,
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
),
ReductionOpInfo(
"masked.argmin",
supports_out=False,
supports_multiple_dims=False,
supports_autograd=False,
dtypes=all_types_and(torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.argmin, supports_keepdims=False),
skips=(
DecorateInfo(
unittest.expectedFailure,
"TestNormalizeOperators",
"test_normalize_operator_exhaustive",
),
# initial is not a keyword for argmin
DecorateInfo(
unittest.expectedFailure, "TestReductions", "test_reference_masked"
),
# NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults
DecorateInfo(
unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"
),
),
sample_inputs_func=sample_inputs_masked_reduction,
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
),
ReductionOpInfo(
"masked.mean",
ref=reference_reduction_numpy(np.mean)
if np.lib.NumpyVersion(np.__version__) >= "1.20.2"
else None,
method_variant=None,
nan_policy="propagate",
supports_out=False,
supports_sparse_csr=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_float=True,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
skips=(
DecorateInfo(
unittest.expectedFailure,
"TestNormalizeOperators",
"test_normalize_operator_exhaustive",
),
# FIXME: sum reduces all dimensions when dim=[]
DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"),
DecorateInfo(
unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim"
),
# RuntimeError: undefined value tensor
DecorateInfo(
unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"
),
# FIXME: "_segment_reduce_lengths_cpu/cuda" not implemented for ... (used for sparse_csr inputs)
DecorateInfo(
unittest.skip("Skipped!"),
"TestMasked",
"test_mask_layout",
dtypes=(torch.bool, *integral_types(), *complex_types()),
),
),
decorators=[
DecorateInfo(
toleranceOverride(
{
torch.bfloat16: tol(atol=1e-03, rtol=0.05),
torch.float16: tol(atol=1e-03, rtol=1e-03),
}
),
"TestReductions",
"test_reference_masked",
),
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}),
"TestReductions",
"test_ref_small_input",
),
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-03, rtol=2e-03)}),
"TestSparseCompressed",
"test_consistency",
device_type="cuda",
),
],
sample_inputs_func=sample_inputs_masked_reduction,
sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction,
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
),
OpInfo(
"masked.median",
dtypes=floating_types_and(torch.bfloat16, torch.float16),
method_variant=None,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
DecorateInfo(
unittest.expectedFailure,
"TestNormalizeOperators",
"test_normalize_operator_exhaustive",
),
# NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults
DecorateInfo(
unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit"
),
),
sample_inputs_func=partial(
sample_inputs_masked_softmax, use_zero_dimensions=False
),
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
),
ReductionOpInfo(
"masked.norm",
identity=0,
method_variant=None,
nan_policy="propagate",
supports_out=False,
promotes_int_to_float=True,
dtypes=floating_types_and(torch.float16, torch.bfloat16),
skips=(
DecorateInfo(
unittest.expectedFailure,
"TestNormalizeOperators",
"test_normalize_operator_exhaustive",
),
# FIXME: sum reduces all dimensions when dim=[]
DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"),
DecorateInfo(
unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim"
),
# torch.jit.frontend.NotSupportedError: Compiled functions
# can't take variable number of arguments or use
# keyword-only arguments with defaults
DecorateInfo(
unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"
),
),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_masked_norm,
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
),
ReductionOpInfo(
"masked.var",
ref=reference_masked_std_var(np.var)
if np.lib.NumpyVersion(np.__version__) >= "1.20.2"
else None,
method_variant=None,
nan_policy="propagate",
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
promotes_int_to_float=True,
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
"TestSchemaCheckModeOpInfo",
"test_schema_correctness",
dtypes=(torch.complex64, torch.complex128),
),
DecorateInfo(
unittest.expectedFailure,
"TestNormalizeOperators",
"test_normalize_operator_exhaustive",
),
# FIXME: sum reduces all dimensions when dim=[]
DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"),
DecorateInfo(
unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim"
),
# RuntimeError: undefined value tensor
DecorateInfo(
unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"
),
),
decorators=[
DecorateInfo(
toleranceOverride(
{
torch.float16: tol(atol=1e-02, rtol=1e-02),
torch.bfloat16: tol(atol=1e-03, rtol=1e-03),
}
),
"TestReductions",
"test_reference_masked",
),
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),
"TestReductions",
"test_ref_small_input",
),
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),
"TestMasked",
"test_reference_masked",
),
DecorateInfo(
toleranceOverride(
{
torch.float16: tol(atol=1e-02, rtol=1e-02),
torch.bfloat16: tol(atol=1e-03, rtol=1e-03),
}
),
"TestMasked",
"test_reference_masked",
),
DecorateInfo(
toleranceOverride(
{
torch.float16: tol(atol=4e-5, rtol=2e-2),
}
),
"TestInductorOpInfo",
"test_comprehensive",
device_type="cuda",
),
],
sample_inputs_func=sample_inputs_masked_std_var,
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
check_batched_grad=True,
),
ReductionOpInfo(
"masked.std",
ref=reference_masked_std_var(np.std)
if np.lib.NumpyVersion(np.__version__) >= "1.20.2"
else None,
method_variant=None,
nan_policy="propagate",
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
promotes_int_to_float=True,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
"TestSchemaCheckModeOpInfo",
"test_schema_correctness",
dtypes=(torch.complex64, torch.complex128),
),
DecorateInfo(
unittest.expectedFailure,
"TestNormalizeOperators",
"test_normalize_operator_exhaustive",
),
# FIXME: sum reduces all dimensions when dim=[]
DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"),
DecorateInfo(
unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim"
),
# RuntimeError: undefined value tensor
DecorateInfo(
unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"
),
),
decorators=[
DecorateInfo(
toleranceOverride(
{
torch.bfloat16: tol(atol=1e-02, rtol=1e-02),
torch.float16: tol(atol=1e-02, rtol=1e-02),
}
),
"TestReductions",
"test_reference_masked",
),
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),
"TestReductions",
"test_ref_small_input",
),
DecorateInfo(
toleranceOverride(
{
torch.float16: tol(atol=1e-02, rtol=1e-02),
torch.bfloat16: tol(atol=5e-03, rtol=5e-04),
}
),
"TestMasked",
"test_reference_masked",
),
],
sample_inputs_func=sample_inputs_masked_std_var,
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
check_batched_grad=True,
),
OpInfo(
"masked.softmax",
method_variant=None,
dtypes=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_masked_softmax,
skips=(
DecorateInfo(
unittest.expectedFailure,
"TestNormalizeOperators",
"test_normalize_operator_exhaustive",
),
DecorateInfo(
unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"
),
),
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
),
OpInfo(
"masked.log_softmax",
method_variant=None,
dtypes=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_masked_softmax,
skips=(
DecorateInfo(
unittest.expectedFailure,
"TestNormalizeOperators",
"test_normalize_operator_exhaustive",
),
DecorateInfo(
unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"
),
),
decorators=[
DecorateInfo(
toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1e-02)}),
"TestMasked",
"test_reference_masked",
),
],
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
),
OpInfo(
"masked.softmin",
method_variant=None,
dtypes=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_masked_softmax,
skips=(
DecorateInfo(
unittest.expectedFailure,
"TestNormalizeOperators",
"test_normalize_operator_exhaustive",
),
DecorateInfo(
unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"
),
# FIXME:
# Mismatched elements: 2 / 2 (100.0%)
# Greatest absolute difference: nan at index (0,) (up to 0.0001 allowed)
# Greatest relative difference: nan at index (0,) (up to 0.0001 allowed
DecorateInfo(
unittest.skip("Skipped!"),
"TestOperators",
"test_vmapvjpvjp",
device_type="cpu",
),
),
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
),
OpInfo(
"masked.normalize",
method_variant=None,
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_masked_normalize,
decorators=[
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=2e-5, rtol=6e-3)}),
"TestInductorOpInfo",
"test_comprehensive",
device_type="cuda",
),
],
skips=(
DecorateInfo(
unittest.expectedFailure,
"TestNormalizeOperators",
"test_normalize_operator_exhaustive",
),
DecorateInfo(
unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"
),
),
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
),
OpInfo(
"masked.logaddexp",
dtypes=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_forward_grad=False,
skips=(
DecorateInfo(
unittest.expectedFailure,
"TestNormalizeOperators",
"test_normalize_operator_exhaustive",
),
# NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults
DecorateInfo(
unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit"
),
DecorateInfo(
unittest.skip("Skipped!"), "TestFwdGradients", "test_fn_gradgrad"
),
DecorateInfo(
unittest.skip("Skipped!"), "TestBwdGradients", "test_fn_gradgrad"
),
),
sample_inputs_func=sample_inputs_masked_logaddexp,
gradcheck_wrapper=gradcheck_wrapper_masked_pointwise_operation,
),
ReductionOpInfo(
"masked.logsumexp",
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
method_variant=None,
nan_policy="propagate",
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestNormalizeOperators",
"test_normalize_operator_exhaustive",
),
# FIXME: reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), "TestReductions", "test_dim_empty"),
DecorateInfo(
unittest.skip("Skipped!"), "TestReductions", "test_dim_empty_keepdim"
),
# Identity can't be -torch.inf without overflow
DecorateInfo(
unittest.skip("Skipped!"),
"TestReductions",
"test_empty_tensor_empty_slice",
),
# NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults
DecorateInfo(
unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit"
),
# all the values are the same except for -inf vs nan
DecorateInfo(unittest.skip("Skipped!"), "TestDecomp", "test_comprehensive"),
# FIXME:
# Mismatched elements: 2 / 12 (16.7%)
# Greatest absolute difference: 9223372034707292160 at index (0, 0, 0, 0)
# Greatest relative difference: 0.0 at index (0, 0, 0, 1)
DecorateInfo(
unittest.skip("Skipped!"),
"TestInductorOpInfo",
"test_comprehensive",
device_type="cpu",
),
),
sample_inputs_func=sample_inputs_masked_reduction,
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
),
]
```
|
=========================================================================================================================================
SOURCE CODE FILE: fft.py
LINES: 1
SIZE: 29.54 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\opinfo\definitions\fft.py
ENCODING: utf-8
```py
# mypy: ignore-errors
import unittest
from functools import partial
import numpy as np
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import SM53OrLater
from torch.testing._internal.common_device_type import precisionOverride
from torch.testing._internal.common_dtype import (
all_types_and,
all_types_and_complex_and,
)
from torch.testing._internal.common_utils import TEST_SCIPY, TEST_WITH_ROCM
from torch.testing._internal.opinfo.core import (
DecorateInfo,
ErrorInput,
OpInfo,
sample_inputs_spectral_ops,
SampleInput,
SpectralFuncInfo,
SpectralFuncType,
)
from torch.testing._internal.opinfo.refs import (
_find_referenced_opinfo,
_inherit_constructor_args,
PythonRefInfo,
)
has_scipy_fft = False
if TEST_SCIPY:
try:
import scipy.fft
has_scipy_fft = True
except ModuleNotFoundError:
pass
class SpectralFuncPythonRefInfo(SpectralFuncInfo):
"""
An OpInfo for a Python reference of an elementwise unary operation.
"""
def __init__(
self,
name, # the stringname of the callable Python reference
*,
op=None, # the function variant of the operation, populated as torch.<name> if None
torch_opinfo_name, # the string name of the corresponding torch opinfo
torch_opinfo_variant="",
**kwargs,
): # additional kwargs override kwargs inherited from the torch opinfo
self.torch_opinfo_name = torch_opinfo_name
self.torch_opinfo = _find_referenced_opinfo(
torch_opinfo_name, torch_opinfo_variant, op_db=op_db
)
assert isinstance(self.torch_opinfo, SpectralFuncInfo)
inherited = self.torch_opinfo._original_spectral_func_args
ukwargs = _inherit_constructor_args(name, op, inherited, kwargs)
super().__init__(**ukwargs)
def error_inputs_fft(op_info, device, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=torch.float32)
# Zero-dimensional tensor has no dimension to take FFT of
yield ErrorInput(
SampleInput(make_arg()),
error_type=IndexError,
error_regex="Dimension specified as -1 but tensor has no dimensions",
)
def error_inputs_fftn(op_info, device, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=torch.float32)
# Specifying a dimension on a zero-dimensional tensor
yield ErrorInput(
SampleInput(make_arg(), dim=(0,)),
error_type=IndexError,
error_regex="Dimension specified as 0 but tensor has no dimensions",
)
def sample_inputs_fft_with_min(
op_info, device, dtype, requires_grad=False, *, min_size, **kwargs
):
yield from sample_inputs_spectral_ops(
op_info, device, dtype, requires_grad, **kwargs
)
if TEST_WITH_ROCM:
# FIXME: Causes floating point exception on ROCm
return
# Check the "Invalid number of data points" error isn't too strict
# https://github.com/pytorch/pytorch/pull/109083
a = make_tensor(min_size, dtype=dtype, device=device, requires_grad=requires_grad)
yield SampleInput(a)
def sample_inputs_fftshift(op_info, device, dtype, requires_grad, **kwargs):
def mt(shape, **kwargs):
return make_tensor(
shape, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs
)
yield SampleInput(mt((9, 10)))
yield SampleInput(mt((50,)), kwargs=dict(dim=0))
yield SampleInput(mt((5, 11)), kwargs=dict(dim=(1,)))
yield SampleInput(mt((5, 6)), kwargs=dict(dim=(0, 1)))
yield SampleInput(mt((5, 6, 2)), kwargs=dict(dim=(0, 2)))
# Operator database
op_db: list[OpInfo] = [
SpectralFuncInfo(
"fft.fft",
aten_name="fft_fft",
decomp_aten_name="_fft_c2c",
ref=np.fft.fft,
ndimensional=SpectralFuncType.OneD,
dtypes=all_types_and_complex_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool,
*(() if (not SM53OrLater) else (torch.half, torch.complex32)),
),
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=1),
error_inputs_func=error_inputs_fft,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
),
SpectralFuncInfo(
"fft.fft2",
aten_name="fft_fft2",
ref=np.fft.fft2,
decomp_aten_name="_fft_c2c",
ndimensional=SpectralFuncType.TwoD,
dtypes=all_types_and_complex_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool,
*(() if (not SM53OrLater) else (torch.half, torch.complex32)),
),
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)),
error_inputs_func=error_inputs_fftn,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
decorators=[precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4})],
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_complex_half_reference_testing",
device_type="cuda",
dtypes=[torch.complex32],
active_if=TEST_WITH_ROCM,
),
),
),
SpectralFuncInfo(
"fft.fftn",
aten_name="fft_fftn",
decomp_aten_name="_fft_c2c",
ref=np.fft.fftn,
ndimensional=SpectralFuncType.ND,
dtypes=all_types_and_complex_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool,
*(() if (not SM53OrLater) else (torch.half, torch.complex32)),
),
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)),
error_inputs_func=error_inputs_fftn,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
decorators=[precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4})],
),
SpectralFuncInfo(
"fft.hfft",
aten_name="fft_hfft",
decomp_aten_name="_fft_c2r",
ref=np.fft.hfft,
ndimensional=SpectralFuncType.OneD,
dtypes=all_types_and_complex_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool,
*(() if (not SM53OrLater) else (torch.half, torch.complex32)),
),
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=2),
error_inputs_func=error_inputs_fft,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
check_batched_gradgrad=False,
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
"TestSchemaCheckModeOpInfo",
"test_schema_correctness",
dtypes=(torch.complex64, torch.complex128),
),
),
),
SpectralFuncInfo(
"fft.hfft2",
aten_name="fft_hfft2",
decomp_aten_name="_fft_c2r",
ref=scipy.fft.hfft2 if has_scipy_fft else None,
ndimensional=SpectralFuncType.TwoD,
dtypes=all_types_and_complex_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool,
*(() if (not SM53OrLater) else (torch.half, torch.complex32)),
),
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(2, 2)),
error_inputs_func=error_inputs_fftn,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_gradgrad=False,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
decorators=[
DecorateInfo(
precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}),
"TestFFT",
"test_reference_nd",
),
],
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
"TestSchemaCheckModeOpInfo",
"test_schema_correctness",
),
# FIXME: errors are too large; needs investigation
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_complex_half_reference_testing",
device_type="cuda",
),
),
),
SpectralFuncInfo(
"fft.hfftn",
aten_name="fft_hfftn",
decomp_aten_name="_fft_c2r",
ref=scipy.fft.hfftn if has_scipy_fft else None,
ndimensional=SpectralFuncType.ND,
dtypes=all_types_and_complex_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool,
*(() if (not SM53OrLater) else (torch.half, torch.complex32)),
),
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(2, 2)),
error_inputs_func=error_inputs_fftn,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_gradgrad=False,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
decorators=[
DecorateInfo(
precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}),
"TestFFT",
"test_reference_nd",
),
],
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
"TestSchemaCheckModeOpInfo",
"test_schema_correctness",
),
),
),
SpectralFuncInfo(
"fft.rfft",
aten_name="fft_rfft",
decomp_aten_name="_fft_r2c",
ref=np.fft.rfft,
ndimensional=SpectralFuncType.OneD,
dtypes=all_types_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and(
torch.bool, *(() if (not SM53OrLater) else (torch.half,))
),
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=1),
error_inputs_func=error_inputs_fft,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_grad=False,
skips=(),
check_batched_gradgrad=False,
),
SpectralFuncInfo(
"fft.rfft2",
aten_name="fft_rfft2",
decomp_aten_name="_fft_r2c",
ref=np.fft.rfft2,
ndimensional=SpectralFuncType.TwoD,
dtypes=all_types_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and(
torch.bool, *(() if (not SM53OrLater) else (torch.half,))
),
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)),
error_inputs_func=error_inputs_fftn,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_grad=False,
check_batched_gradgrad=False,
decorators=[
precisionOverride({torch.float: 1e-4}),
],
),
SpectralFuncInfo(
"fft.rfftn",
aten_name="fft_rfftn",
decomp_aten_name="_fft_r2c",
ref=np.fft.rfftn,
ndimensional=SpectralFuncType.ND,
dtypes=all_types_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and(
torch.bool, *(() if (not SM53OrLater) else (torch.half,))
),
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)),
error_inputs_func=error_inputs_fftn,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_grad=False,
check_batched_gradgrad=False,
decorators=[
precisionOverride({torch.float: 1e-4}),
],
),
SpectralFuncInfo(
"fft.ifft",
aten_name="fft_ifft",
decomp_aten_name="_fft_c2c",
ref=np.fft.ifft,
ndimensional=SpectralFuncType.OneD,
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=1),
error_inputs_func=error_inputs_fft,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
dtypes=all_types_and_complex_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool,
*(() if (not SM53OrLater) else (torch.half, torch.complex32)),
),
),
SpectralFuncInfo(
"fft.ifft2",
aten_name="fft_ifft2",
decomp_aten_name="_fft_c2c",
ref=np.fft.ifft2,
ndimensional=SpectralFuncType.TwoD,
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)),
error_inputs_func=error_inputs_fftn,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
dtypes=all_types_and_complex_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool,
*(() if (not SM53OrLater) else (torch.half, torch.complex32)),
),
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
"TestFFT",
"test_reference_nd",
)
],
),
SpectralFuncInfo(
"fft.ifftn",
aten_name="fft_ifftn",
decomp_aten_name="_fft_c2c",
ref=np.fft.ifftn,
ndimensional=SpectralFuncType.ND,
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)),
error_inputs_func=error_inputs_fftn,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
dtypes=all_types_and_complex_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool,
*(() if (not SM53OrLater) else (torch.half, torch.complex32)),
),
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
"TestFFT",
"test_reference_nd",
)
],
),
SpectralFuncInfo(
"fft.ihfft",
aten_name="fft_ihfft",
decomp_aten_name="_fft_r2c",
ref=np.fft.ihfft,
ndimensional=SpectralFuncType.OneD,
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)),
error_inputs_func=error_inputs_fft,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
dtypes=all_types_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and(
torch.bool, *(() if (not SM53OrLater) else (torch.half,))
),
skips=(),
check_batched_grad=False,
),
SpectralFuncInfo(
"fft.ihfft2",
aten_name="fft_ihfft2",
decomp_aten_name="_fft_r2c",
ref=scipy.fft.ihfftn if has_scipy_fft else None,
ndimensional=SpectralFuncType.TwoD,
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)),
error_inputs_func=error_inputs_fftn,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
dtypes=all_types_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and(
torch.bool, *(() if (not SM53OrLater) else (torch.half,))
),
check_batched_grad=False,
check_batched_gradgrad=False,
decorators=(
# The values for attribute 'shape' do not match: torch.Size([5, 6, 5]) != torch.Size([5, 6, 6]).
DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out_warning"),
DecorateInfo(
precisionOverride({torch.float: 2e-4}), "TestFFT", "test_reference_nd"
),
# Mismatched elements!
DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out"),
DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out_warnings"),
),
),
SpectralFuncInfo(
"fft.ihfftn",
aten_name="fft_ihfftn",
decomp_aten_name="_fft_r2c",
ref=scipy.fft.ihfftn if has_scipy_fft else None,
ndimensional=SpectralFuncType.ND,
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)),
error_inputs_func=error_inputs_fftn,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
dtypes=all_types_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archss
dtypesIfCUDA=all_types_and(
torch.bool, *(() if (not SM53OrLater) else (torch.half,))
),
check_batched_grad=False,
check_batched_gradgrad=False,
decorators=[
# The values for attribute 'shape' do not match: torch.Size([5, 6, 5]) != torch.Size([5, 6, 6]).
DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out_warning"),
# Mismatched elements!
DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out"),
DecorateInfo(
precisionOverride({torch.float: 2e-4}), "TestFFT", "test_reference_nd"
),
],
),
SpectralFuncInfo(
"fft.irfft",
aten_name="fft_irfft",
decomp_aten_name="_fft_c2r",
ref=np.fft.irfft,
ndimensional=SpectralFuncType.OneD,
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 2)),
error_inputs_func=error_inputs_fft,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
dtypes=all_types_and_complex_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool,
*(() if (not SM53OrLater) else (torch.half, torch.complex32)),
),
check_batched_gradgrad=False,
),
SpectralFuncInfo(
"fft.irfft2",
aten_name="fft_irfft2",
decomp_aten_name="_fft_c2r",
ref=np.fft.irfft2,
ndimensional=SpectralFuncType.TwoD,
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 2)),
error_inputs_func=error_inputs_fftn,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
dtypes=all_types_and_complex_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool,
*(() if (not SM53OrLater) else (torch.half, torch.complex32)),
),
check_batched_gradgrad=False,
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
"TestFFT",
"test_reference_nd",
)
],
),
SpectralFuncInfo(
"fft.irfftn",
aten_name="fft_irfftn",
decomp_aten_name="_fft_c2r",
ref=np.fft.irfftn,
ndimensional=SpectralFuncType.ND,
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 2)),
error_inputs_func=error_inputs_fftn,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
dtypes=all_types_and_complex_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool,
*(() if (not SM53OrLater) else (torch.half, torch.complex32)),
),
check_batched_gradgrad=False,
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
"TestFFT",
"test_reference_nd",
)
],
),
OpInfo(
"fft.fftshift",
dtypes=all_types_and_complex_and(
torch.bool, torch.bfloat16, torch.half, torch.chalf
),
sample_inputs_func=sample_inputs_fftshift,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
),
OpInfo(
"fft.ifftshift",
dtypes=all_types_and_complex_and(
torch.bool, torch.bfloat16, torch.half, torch.chalf
),
sample_inputs_func=sample_inputs_fftshift,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
),
]
python_ref_db: list[OpInfo] = [
SpectralFuncPythonRefInfo(
"_refs.fft.fft",
torch_opinfo_name="fft.fft",
),
SpectralFuncPythonRefInfo(
"_refs.fft.ifft",
torch_opinfo_name="fft.ifft",
),
SpectralFuncPythonRefInfo(
"_refs.fft.rfft",
torch_opinfo_name="fft.rfft",
),
SpectralFuncPythonRefInfo(
"_refs.fft.irfft",
torch_opinfo_name="fft.irfft",
),
SpectralFuncPythonRefInfo(
"_refs.fft.hfft",
torch_opinfo_name="fft.hfft",
),
SpectralFuncPythonRefInfo(
"_refs.fft.ihfft",
torch_opinfo_name="fft.ihfft",
),
SpectralFuncPythonRefInfo(
"_refs.fft.fftn",
torch_opinfo_name="fft.fftn",
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
"TestFFT",
"test_reference_nd",
)
],
),
SpectralFuncPythonRefInfo(
"_refs.fft.ifftn",
torch_opinfo_name="fft.ifftn",
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
"TestFFT",
"test_reference_nd",
)
],
),
SpectralFuncPythonRefInfo(
"_refs.fft.rfftn",
torch_opinfo_name="fft.rfftn",
),
SpectralFuncPythonRefInfo(
"_refs.fft.irfftn",
torch_opinfo_name="fft.irfftn",
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
"TestFFT",
"test_reference_nd",
)
],
),
SpectralFuncPythonRefInfo(
"_refs.fft.hfftn",
torch_opinfo_name="fft.hfftn",
decorators=[
DecorateInfo(
precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}),
"TestFFT",
"test_reference_nd",
)
],
),
SpectralFuncPythonRefInfo(
"_refs.fft.ihfftn",
torch_opinfo_name="fft.ihfftn",
decorators=[
DecorateInfo(
precisionOverride({torch.float: 2e-4}),
"TestFFT",
"test_reference_nd",
),
# AssertionError: Reference result was farther (0.09746177145360499) from the precise
# computation than the torch result was (0.09111555632069855)
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_python_ref_torch_fallback",
dtypes=(torch.float16,),
device_type="cuda",
),
# AssertionError: Reference result was farther (0.0953431016138116) from the precise
# computation than the torch result was (0.09305490684430734)
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_python_ref_executor",
dtypes=(torch.float16,),
device_type="cuda",
),
],
),
SpectralFuncPythonRefInfo(
"_refs.fft.fft2",
torch_opinfo_name="fft.fft2",
),
SpectralFuncPythonRefInfo(
"_refs.fft.ifft2",
torch_opinfo_name="fft.ifft2",
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
"TestFFT",
"test_reference_nd",
)
],
),
SpectralFuncPythonRefInfo(
"_refs.fft.rfft2",
torch_opinfo_name="fft.rfft2",
),
SpectralFuncPythonRefInfo(
"_refs.fft.irfft2",
torch_opinfo_name="fft.irfft2",
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
"TestFFT",
"test_reference_nd",
)
],
),
SpectralFuncPythonRefInfo(
"_refs.fft.hfft2",
torch_opinfo_name="fft.hfft2",
decorators=[
DecorateInfo(
precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}),
"TestFFT",
"test_reference_nd",
)
],
),
SpectralFuncPythonRefInfo(
"_refs.fft.ihfft2",
torch_opinfo_name="fft.ihfft2",
decorators=[
DecorateInfo(
precisionOverride({torch.float: 2e-4}),
"TestFFT",
"test_reference_nd",
),
# FIXME:
# Reference result was farther (0.0953431016138116) from the precise computation
# than the torch result was (0.09305490684430734)!
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_python_ref_executor",
device_type="cuda",
),
],
),
PythonRefInfo(
"_refs.fft.fftshift",
op_db=op_db,
torch_opinfo_name="fft.fftshift",
),
PythonRefInfo(
"_refs.fft.ifftshift",
op_db=op_db,
torch_opinfo_name="fft.ifftshift",
),
]
```
|
============================================================================================================================================
SOURCE CODE FILE: linalg.py
LINES: 1
SIZE: 84.39 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\opinfo\definitions\linalg.py
ENCODING: utf-8
```py
# mypy: ignore-errors
import itertools
import random
import unittest
from collections.abc import Iterable
from functools import partial
from itertools import chain, product
import numpy as np
from numpy import inf
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import (
_get_magma_version,
_get_torch_cuda_version,
with_tf32_off,
)
from torch.testing._internal.common_device_type import (
has_cusolver,
skipCPUIfNoLapack,
skipCUDAIf,
skipCUDAIfNoCusolver,
skipCUDAIfNoMagma,
skipCUDAIfNoMagmaAndNoCusolver,
skipCUDAIfNoMagmaAndNoLinalgsolver,
skipCUDAIfRocm,
tol,
toleranceOverride,
)
from torch.testing._internal.common_dtype import (
all_types_and_complex,
all_types_and_complex_and,
floating_and_complex_types,
floating_and_complex_types_and,
)
from torch.testing._internal.common_utils import (
GRADCHECK_NONDET_TOL,
make_fullrank_matrices_with_distinct_singular_values,
skipIfSlowGradcheckEnv,
slowTest,
TEST_WITH_ROCM,
)
from torch.testing._internal.opinfo.core import (
clone_sample,
DecorateInfo,
ErrorInput,
gradcheck_wrapper_hermitian_input,
L,
M,
OpInfo,
ReductionOpInfo,
S,
SampleInput,
)
from torch.testing._internal.opinfo.refs import PythonRefInfo, ReductionPythonRefInfo
def sample_kwargs_vector_norm(t, **kwargs):
# orders with / without identity
def ords():
has_id = (6, 4, 2, 1, 0, 0.9)
no_id = (inf, -2.1, -inf)
if t.numel() == 0:
dim = kwargs.get("dim")
if dim is None:
return has_id
if not isinstance(dim, Iterable):
dim = (dim,)
for d in dim:
if t.size(d) == 0:
return has_id
return has_id + no_id
return (((), dict(ord=o)) for o in ords())
def sample_inputs_svd(op_info, device, dtype, requires_grad=False, **kwargs):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(
make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad
)
is_linalg_svd = "linalg.svd" in op_info.name
batches = [(), (0,), (3,)]
ns = [0, 3, 5]
def uniformize(usv):
S = usv[1]
k = S.shape[-1]
U = usv[0][..., :k]
Vh = usv[2] if is_linalg_svd else usv[2].mH
Vh = Vh[..., :k, :]
return U, S, Vh
def fn_U(usv):
U, _, _ = uniformize(usv)
return U.abs()
def fn_S(usv):
return uniformize(usv)[1]
def fn_Vh(usv):
# We also return S to test
_, S, Vh = uniformize(usv)
return S, Vh.abs()
def fn_UVh(usv):
U, S, Vh = uniformize(usv)
return U @ Vh, S
fns = (fn_U, fn_S, fn_Vh, fn_UVh)
fullmat = "full_matrices" if is_linalg_svd else "some"
for batch, n, k, fullmat_val, fn in product(batches, ns, ns, (True, False), fns):
shape = batch + (n, k)
yield SampleInput(
make_arg(*shape), kwargs={fullmat: fullmat_val}, output_process_fn_grad=fn
)
def sample_inputs_cross(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(
make_tensor, dtype=dtype, device=device, requires_grad=requires_grad
)
yield SampleInput(make_arg((S, 3)), args=(make_arg((S, 3)),))
yield SampleInput(
make_arg((S, 3, S)), args=(make_arg((S, 3, S)),), kwargs=dict(dim=1)
)
yield SampleInput(make_arg((1, 3)), args=(make_arg((S, 3)),), kwargs=dict(dim=-1))
def error_inputs_cross(op_info, device, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=torch.float32)
sample = SampleInput(input=make_arg((S, 3)), args=(make_arg((S, 1)),))
err = "inputs dimension -1 must have length 3"
yield ErrorInput(sample, error_regex=err, error_type=RuntimeError)
sample = SampleInput(input=make_arg((5, S, 3)), args=(make_arg((S, 3)),))
err = "inputs must have the same number of dimensions"
yield ErrorInput(sample, error_regex=err, error_type=RuntimeError)
sample = SampleInput(input=make_arg((S, 2)), args=(make_arg((S, 2)),))
err = "must have length 3"
yield ErrorInput(sample, error_regex=err, error_type=RuntimeError)
sample = SampleInput(
input=make_arg((S, 2)), args=(make_arg((S, 2)),), kwargs=dict(dim=2)
)
err = "Dimension out of range"
yield ErrorInput(sample, error_regex=err, error_type=IndexError)
def sample_inputs_householder_product(op_info, device, dtype, requires_grad, **kwargs):
"""
This function generates input for torch.linalg.householder_product (torch.orgqr).
The first argument should be a square matrix or batch of square matrices, the second argument is a vector or batch of vectors.
Empty, square, rectangular, batched square and batched rectangular input is generated.
"""
make_arg = partial(
make_tensor,
device=device,
dtype=dtype,
requires_grad=requires_grad,
low=-2,
high=2,
)
# Each column of the matrix is getting multiplied many times leading to very large values for
# the Jacobian matrix entries and making the finite-difference result of grad check less accurate.
# That's why gradcheck with the default range [-9, 9] fails and [-2, 2] is used here.
yield SampleInput(make_arg((S, S)), make_arg((S,)))
yield SampleInput(make_arg((S + 1, S)), make_arg((S,)))
yield SampleInput(make_arg((2, 1, S, S)), make_arg((2, 1, S)))
yield SampleInput(make_arg((2, 1, S + 1, S)), make_arg((2, 1, S)))
yield SampleInput(
make_arg((0, 0), low=None, high=None),
make_arg((0,), low=None, high=None),
)
yield SampleInput(make_arg((S, S)), make_arg((0,), low=None, high=None))
# m = n = S, k = S - 2
yield SampleInput(make_arg((S, S)), make_arg((S - 2,), low=None, high=None))
# m = S, n = S -1, k = S - 2
yield SampleInput(make_arg((S, S - 1)), make_arg((S - 2,), low=None, high=None))
def sample_inputs_linalg_matrix_power(op_info, device, dtype, requires_grad, **kwargs):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(
make_tensor, dtype=dtype, device=device, requires_grad=requires_grad
)
make_arg_fullrank = partial(
make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad
)
# (<matrix_size>, (<batch_sizes, ...>))
test_sizes = [
(1, ()),
(2, (0,)),
(2, (2,)),
]
for matrix_size, batch_sizes in test_sizes:
size = batch_sizes + (matrix_size, matrix_size)
for n in (0, 3, 5):
yield SampleInput(make_arg(size), args=(n,))
for n in [-4, -2, -1]:
yield SampleInput(make_arg_fullrank(*size), args=(n,))
def sample_inputs_linalg_det_logdet_slogdet(
op_info, device, dtype, requires_grad, **kwargs
):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(
make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad
)
batches = [(), (0,), (3,)]
ns = [0, 1, 5]
is_logdet = op_info.name == "logdet"
for (
batch,
n,
) in product(batches, ns):
shape = batch + (n, n)
A = make_arg(*shape)
# Need to make the matrices in A have positive determinant for autograd
# To do so, we multiply A by its determinant to flip the sign of its determinant
if is_logdet and not A.is_complex() and A.numel() > 0:
s = torch.linalg.slogdet(A).sign
A = A * s.unsqueeze(-1).unsqueeze(-1)
A.requires_grad_(requires_grad)
yield SampleInput(A)
def sample_inputs_lu_solve(op_info, device, dtype, requires_grad=False, **kwargs):
"""Samples the inputs for both linalg.lu_solve and lu_solve"""
make_fn = make_fullrank_matrices_with_distinct_singular_values
make_a = partial(make_fn, dtype=dtype, device=device)
make_b = partial(make_tensor, dtype=dtype, device=device)
def clone(X, requires_grad):
Y = X.clone()
Y.requires_grad_(requires_grad)
return Y
is_linalg_lu_solve = op_info.name == "linalg.lu_solve"
batches = ((), (0,), (2,))
ns = (3, 1, 0)
nrhs = (4, 1, 0)
for n, batch, rhs in product(ns, batches, nrhs):
A = make_a(*(batch + (n, n)))
LU, pivots = torch.linalg.lu_factor(A)
B = make_b(batch + (n, rhs))
grads = (False,) if not requires_grad else (True, False)
# we try all possible combinations of requires_grad for each input
for LU_grad, B_grad in product(grads, grads):
# when requires_grad == True, at least one input has to have requires_grad enabled
if requires_grad and not LU_grad and not B_grad:
continue
if is_linalg_lu_solve:
for adjoint, left in product((True, False), repeat=2):
yield SampleInput(
clone(LU, LU_grad),
args=(pivots, clone(B if left else B.mT, B_grad)),
kwargs=dict(adjoint=adjoint, left=left),
)
else:
yield SampleInput(clone(B, B_grad), args=(clone(LU, LU_grad), pivots))
def sample_inputs_linalg_multi_dot(op_info, device, dtype, requires_grad, **kwargs):
# Each test case consists of the sizes in the chain of multiplications
# e.g. [2, 3, 4, 5] generates matrices (2, 3) @ (3, 4) @ (4, 5)
test_cases = [
[1, 2, 1],
[2, 0, 2],
[0, 2, 2],
[2, 2, 2, 2],
[2, 3, 4, 5],
[5, 4, 0, 2],
[2, 4, 3, 5, 3, 2],
]
for sizes in test_cases:
tensors = []
for size in zip(sizes[:-1], sizes[1:]):
t = make_tensor(
size, dtype=dtype, device=device, requires_grad=requires_grad
)
tensors.append(t)
yield SampleInput(tensors)
def sample_inputs_linalg_matrix_norm(op_info, device, dtype, requires_grad, **kwargs):
low_precision_dtypes = (torch.float16, torch.bfloat16, torch.complex32)
make_arg = partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad
)
sizes = ((2, 2), (2, 3, 2))
if dtype in low_precision_dtypes:
# svdvals not supported for low precision dtypes
ords = ("fro", inf, -inf, 1, -1)
else:
ords = ("fro", "nuc", inf, -inf, 1, -1, 2, -2)
dims = ((-2, -1), (-1, 0))
for size, ord, dim, keepdim in product(sizes, ords, dims, [True, False]):
yield SampleInput(make_arg(size), args=(ord, dim, keepdim))
def sample_inputs_linalg_norm(
op_info, device, dtype, requires_grad, *, variant=None, **kwargs
):
if variant is not None and variant not in ("subgradient_at_zero",):
raise ValueError(
f"Unsupported variant, expected variant to be 'subgradient_at_zero' but got: {variant}"
)
test_sizes = [
(S,),
(0,),
(S, S),
(0, 0),
(S, 0),
(0, S),
(S, S, S),
(0, S, S),
(S, 0, S),
(0, 0, 0),
]
vector_ords = (None, 0, 0.5, 1, 2, 3.5, inf, -0.5, -1, -2, -3.5, -inf)
if dtype in {torch.float16, torch.bfloat16, torch.complex32}:
# svdvals not supported for low precision dtypes
matrix_ords = ("fro", inf, -inf, 1, -1)
else:
matrix_ords = (None, "fro", "nuc", inf, -inf, 1, -1, 2, -2)
make_arg = partial(
make_tensor,
dtype=dtype,
device=device,
requires_grad=requires_grad,
low=None,
high=None,
)
for test_size in test_sizes:
is_vector_norm = len(test_size) == 1
is_matrix_norm = len(test_size) == 2
# IndexError: amax(): Expected reduction dim 0 to have non-zero size.
is_valid_for_p2 = is_vector_norm or (test_size[-1] != 0 and test_size[-2] != 0)
for keepdim in [False, True]:
if variant != "subgradient_at_zero" and is_valid_for_p2:
yield SampleInput(make_arg(test_size), keepdim=keepdim)
if not (is_vector_norm or is_matrix_norm):
continue
ords = vector_ords if is_vector_norm else matrix_ords
for ord in ords:
if is_vector_norm and test_size[-1] == 0:
if ord == np.inf or (ord is not None and ord < 0):
# RuntimeError: linalg.vector_norm cannot compute the
# {ord} norm on an empty tensor because the operation
# does not have an identity
continue
elif is_matrix_norm:
dims_to_check = {
None: (0,),
np.inf: (0,),
2: (0, 1),
1: (1,),
-1: (1,),
-2: (0, 1),
-np.inf: (0,),
}.get(ord, ())
if any(test_size[d] == 0 for d in dims_to_check):
# IndexError: amax(): Expected reduction dim {dim} to
# have non-zero size.
continue
if variant == "subgradient_at_zero":
yield SampleInput(
torch.zeros(
test_size,
dtype=dtype,
device=device,
requires_grad=requires_grad,
),
ord,
keepdim=keepdim,
)
else:
yield SampleInput(make_arg(test_size), ord, keepdim=keepdim)
if ord in ["nuc", "fro"]:
yield SampleInput(
make_arg(test_size), ord=ord, keepdim=keepdim, dim=(0, 1)
)
def sample_inputs_linalg_vecdot(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad
)
batches = ((), (0,), (1,), (5,))
ns = (0, 1, 3, 5)
for b, n in product(batches, ns):
shape = b + (n,)
yield SampleInput(make_arg(shape), args=(make_arg(shape),))
for i in range(len(shape)):
yield SampleInput(
make_arg(shape), args=(make_arg(shape),), kwargs=dict(dim=i)
)
def sample_inputs_linalg_invertible(
op_info, device, dtype, requires_grad=False, **kwargs
):
"""
This function generates invertible inputs for linear algebra ops
The input is generated as the itertools.product of 'batches' and 'ns'.
In total this function generates 8 SampleInputs
'batches' cases include:
() - single input,
(0,) - zero batched dimension,
(2,) - batch of two matrices,
(1, 1) - 1x1 batch of matrices
'ns' gives 0x0 and 5x5 matrices.
Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.
"""
make_fn = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(make_fn, dtype=dtype, device=device, requires_grad=requires_grad)
batches = [(), (0,), (2,), (1, 1)]
ns = [5, 0]
for batch, n in product(batches, ns):
yield SampleInput(make_arg(*batch, n, n))
def sample_inputs_matrix_rank(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function produces inputs for matrix rank that test
all possible combinations for atol and rtol
"""
def make_tol_arg(kwarg_type, inp):
if kwarg_type == "none":
return None
if kwarg_type == "float":
return 1.0
assert kwarg_type == "tensor"
return torch.ones(inp.shape[:-2], device=device)
for tol_type in ["float", "tensor"]:
for atol_type, rtol_type in product(["none", tol_type], repeat=2):
if (
not atol_type and not rtol_type
): # default behavior, so skipped here so it's not tested 2 extra times
continue
for sample in sample_inputs_linalg_invertible(
op_info, device, dtype, requires_grad
):
assert sample.kwargs == {}
sample.kwargs = {
"atol": make_tol_arg(atol_type, sample.input),
"rtol": make_tol_arg(rtol_type, sample.input),
}
yield sample
# default kwargs
yield from sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)
def sample_inputs_linalg_pinv_singular(
op_info, device, dtype, requires_grad=False, **kwargs
):
"""
This function produces factors `a` and `b` to generate inputs of the form `a @ b.t()` to
test the backward method of `linalg_pinv`. That way we always preserve the rank of the
input no matter the perturbations applied to it by the gradcheck.
Note that `pinv` is Frechet-differentiable in a rank-preserving neighborhood.
"""
batches = [(), (0,), (2,), (1, 1)]
# the size of at least 30 is required to cause failures for the previous implicit implementation
# of the pinv's backward method, albeit it is slow.
size = [0, 3, 50]
for batch, m, n in product(batches, size, size):
for k in range(min(3, m, n)):
# Note that by making the columns of `a` and `b` orthonormal we make sure that
# the product matrix `a @ b.t()` has condition number 1 when restricted to its image
a = (
torch.rand(*batch, m, k, device=device, dtype=dtype)
.qr()
.Q.requires_grad_(requires_grad)
)
b = (
torch.rand(*batch, n, k, device=device, dtype=dtype)
.qr()
.Q.requires_grad_(requires_grad)
)
yield SampleInput(a, args=(b,))
def sample_inputs_linalg_cond(op_info, device, dtype, requires_grad=False, **kwargs):
make_arg = partial(
make_tensor, dtype=dtype, device=device, requires_grad=requires_grad
)
# autograd is not supported for inputs with zero number of elements
shapes = (
(S, S),
(2, S, S),
(2, 1, S, S),
)
for shape in shapes:
yield SampleInput(make_arg(shape))
def sample_inputs_linalg_vander(op_info, device, dtype, requires_grad=False, **kwargs):
make_arg = partial(
make_tensor, dtype=dtype, device=device, requires_grad=requires_grad
)
shapes = (
(),
(1,),
(S,),
(2, S),
)
for shape in shapes:
if len(shape) > 0 and shape[-1] > 1:
yield SampleInput(make_arg(shape))
n = shape[-1] if len(shape) > 0 else 1
for i in range(3):
# n-1, n, n+1
N = n + i - 1
if N < 2:
continue
yield SampleInput(make_arg(shape), kwargs=dict(N=N))
def np_vander_batched(x, N=None):
# Wrapper around np.vander that supports batches of 1 dimension (enough for the tests)
if x.ndim == 0:
x = x[np.newaxis]
if x.ndim == 1:
y = np.vander(x, N=N, increasing=True)
return y
else:
if N is None:
N = x.shape[-1]
y = np.vander(x.ravel(), N=N, increasing=True).reshape((*x.shape, N))
return y
def sample_inputs_linalg_cholesky_inverse(
op_info, device, dtype, requires_grad=False, **kwargs
):
from torch.testing._internal.common_utils import random_well_conditioned_matrix
# Cholesky factorization is for positive-definite matrices
single_well_conditioned_matrix = random_well_conditioned_matrix(
S, S, dtype=dtype, device=device
)
batch_well_conditioned_matrices = random_well_conditioned_matrix(
2, S, S, dtype=dtype, device=device
)
single_pd = single_well_conditioned_matrix @ single_well_conditioned_matrix.mH
batch_pd = batch_well_conditioned_matrices @ batch_well_conditioned_matrices.mH
inputs = (
torch.zeros(0, 0, dtype=dtype, device=device), # 0x0 matrix
torch.zeros(0, 2, 2, dtype=dtype, device=device), # zero batch of matrices
single_pd,
batch_pd,
)
test_cases = (torch.linalg.cholesky(a, upper=False) for a in inputs)
for l in test_cases:
# generated lower-triangular samples
l.requires_grad = requires_grad
yield SampleInput(l) # upper=False by default
yield SampleInput(
l.detach().clone().requires_grad_(requires_grad), kwargs=dict(upper=False)
)
# generate upper-triangular inputs
u = l.detach().clone().mT.contiguous().requires_grad_(requires_grad)
yield SampleInput(u, kwargs=dict(upper=True))
def sample_inputs_linalg_ldl_factor(
op_info, device, dtype, requires_grad=False, **kwargs
):
from torch.testing._internal.common_utils import (
random_hermitian_pd_matrix,
random_symmetric_pd_matrix,
)
device = torch.device(device)
# Symmetric inputs
yield SampleInput(
random_symmetric_pd_matrix(S, dtype=dtype, device=device),
kwargs=dict(hermitian=False),
) # single matrix
yield SampleInput(
random_symmetric_pd_matrix(S, 2, dtype=dtype, device=device),
kwargs=dict(hermitian=False),
) # batch of matrices
yield SampleInput(
torch.zeros(0, 0, dtype=dtype, device=device), kwargs=dict(hermitian=False)
) # 0x0 matrix
yield SampleInput(
torch.zeros(0, 2, 2, dtype=dtype, device=device), kwargs=dict(hermitian=False)
) # zero batch of matrices
# Hermitian inputs
# hermitian=True for complex inputs on CUDA is supported only with MAGMA 2.5.4+
magma_254_available = device.type == "cuda" and _get_magma_version() >= (2, 5, 4)
if dtype.is_complex and (device.type == "cpu" or magma_254_available):
yield SampleInput(
random_hermitian_pd_matrix(S, dtype=dtype, device=device),
kwargs=dict(hermitian=True),
) # single matrix
yield SampleInput(
random_hermitian_pd_matrix(S, 2, dtype=dtype, device=device),
kwargs=dict(hermitian=True),
) # batch of matrices
def sample_inputs_linalg_ldl_solve(
op_info, device, dtype, requires_grad=False, **kwargs
):
# Generate LDL factors of symmetric (and Hermitian on CPU) matrices
from torch.testing._internal.common_utils import (
random_hermitian_pd_matrix,
random_symmetric_pd_matrix,
)
device = torch.device(device)
symmetric_inputs = (
random_symmetric_pd_matrix(S, dtype=dtype, device=device), # single matrix
random_symmetric_pd_matrix(
S, 2, dtype=dtype, device=device
), # batch of matrices
torch.zeros(0, 0, dtype=dtype, device=device), # 0x0 matrix
torch.zeros(0, 2, 2, dtype=dtype, device=device), # zero batch of matrices
)
hermitian_inputs = (
(
random_hermitian_pd_matrix(S, dtype=dtype, device=device),
random_hermitian_pd_matrix(S, 2, dtype=dtype, device=device),
)
if device.type == "cpu" and dtype.is_complex
else ()
)
test_cases1 = (
torch.linalg.ldl_factor_ex(a, hermitian=False) for a in symmetric_inputs
)
test_cases2 = (
torch.linalg.ldl_factor_ex(a, hermitian=True) for a in hermitian_inputs
)
# Symmetric case
make_arg = partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad
)
for test_case in test_cases1:
factors, pivots, _ = test_case
factors.requires_grad = requires_grad
for B_batch_shape in ((), factors.shape[:-2]):
B = make_arg((*B_batch_shape, factors.shape[-1], S))
yield SampleInput(factors, args=(pivots, B), kwargs=dict(hermitian=False))
clone_factors = factors.detach().clone().requires_grad_(requires_grad)
yield SampleInput(
clone_factors, args=(pivots, B), kwargs=dict(hermitian=False)
)
# Hermitian case
for test_case in test_cases2:
factors, pivots, _ = test_case
factors.requires_grad = requires_grad
for B_batch_shape in ((), factors.shape[:-2]):
B = make_arg((*B_batch_shape, factors.shape[-1], S))
yield SampleInput(factors, args=(pivots, B), kwargs=dict(hermitian=True))
clone_factors = factors.detach().clone().requires_grad_(requires_grad)
yield SampleInput(
clone_factors, args=(pivots, B), kwargs=dict(hermitian=True)
)
def sample_inputs_linalg_lstsq(op_info, device, dtype, requires_grad=False, **kwargs):
from torch.testing._internal.common_utils import random_well_conditioned_matrix
device = torch.device(device)
drivers: tuple[str, ...]
if device.type == "cuda":
drivers = ("gels",)
else:
drivers = ("gels", "gelsy", "gelss", "gelsd")
# we generate matrices of shape (..., n + delta, n)
deltas: tuple[int, ...]
if device.type == "cpu" or has_cusolver():
deltas = (-1, 0, +1)
# only square systems if Cusolver is not available
# becase we solve a lstsq problem with a transposed matrix in the backward
else:
deltas = (0,)
for batch, driver, delta in product(((), (3,), (3, 3)), drivers, deltas):
shape = batch + (3 + delta, 3)
a = random_well_conditioned_matrix(*shape, dtype=dtype, device=device)
a.requires_grad_(requires_grad)
b = make_tensor(
shape,
dtype=dtype,
device=device,
low=None,
high=None,
requires_grad=requires_grad,
)
yield SampleInput(a, b, driver=driver)
def error_inputs_lstsq(op_info, device, **kwargs):
zero_d = torch.randn((), device=device)
yield ErrorInput(
SampleInput(zero_d, args=(zero_d,)),
error_type=RuntimeError,
error_regex="at least 2 dimensions",
)
def error_inputs_lstsq_grad_oriented(op_info, device, **kwargs):
zero_d = torch.randn((), device=device)
yield ErrorInput(
SampleInput(zero_d, args=(zero_d, None)),
error_type=RuntimeError,
error_regex="at least 2 dimensions",
)
def sample_inputs_diagonal_diag_embed(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(
make_tensor, dtype=dtype, device=device, requires_grad=requires_grad
)
# Shapes for 2D Tensors
shapes_2d = ((S, S), (3, 5), (5, 3))
# Shapes for 3D Tensors
shapes_3d = ((S, S, S),)
kwargs_2d = ({}, dict(offset=2), dict(offset=2), dict(offset=1))
kwargs_3d = (
dict(offset=1, dim1=1, dim2=2),
dict(offset=2, dim1=0, dim2=1),
dict(offset=-2, dim1=0, dim2=1),
)
for shape, kwarg in chain(
product(shapes_2d, kwargs_2d), product(shapes_3d, kwargs_3d)
):
yield SampleInput(make_arg(shape), kwargs=kwarg)
def error_inputs_diagonal_diag_embed(op_info, device, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=torch.float32)
shapes1d = (0, 1, (0,), (1,))
shapes2d = ((M, L),)
shapes3d = ((M, S, L),)
kwargs1d = {}
kwargs2d = (
# dim1 == dim2 is not allowed
dict(dim1=1, dim2=1),
# out of bounds dims are not allowed
dict(dim1=10000),
dict(dim2=10000),
)
kwargs3d = kwargs2d
samples1d = product(shapes1d, kwargs1d)
samples2d = product(shapes2d, kwargs2d)
samples3d = product(shapes3d, kwargs3d)
for shape, kwargs in chain(samples1d, samples2d, samples3d):
arg = make_arg(shape)
sample = SampleInput(input=arg, kwargs=kwargs)
dim1 = kwargs.get("dim1")
dim2 = kwargs.get("dim2")
if "diagonal" in op_info.name:
num_dim = arg.dim()
elif op_info.name in ("diag_embed", "_refs.diag_embed"):
# these are valid inputs for diag_embed
if shape in ((0,), (1,)):
continue
num_dim = arg.dim() + 1
else:
raise RuntimeError("should be unreachable")
bound1 = -num_dim
bound2 = num_dim - 1
dim_range = range(bound1, bound2 + 1)
dim1_cond = dim1 and dim1 not in dim_range
dim2_cond = dim2 and dim2 not in dim_range
if dim1 == dim2:
err = f"diagonal dimensions cannot be identical {dim1}, {dim2}"
yield ErrorInput(sample, error_regex=err, error_type=RuntimeError)
elif dim1_cond or dim2_cond:
err_dim = dim1 if dim1_cond else dim2
err = (
r"Dimension out of range \(expected to be in range of "
rf"\[{bound1}, {bound2}\], but got {err_dim}\)"
)
yield ErrorInput(sample, error_regex=err, error_type=IndexError)
else:
raise RuntimeError("should be unreachable")
def sample_inputs_linalg_cholesky(
op_info, device, dtype, requires_grad=False, **kwargs
):
"""
This function generates always positive-definite input for torch.linalg.cholesky using
random_hermitian_pd_matrix.
The input is generated as the itertools.product of 'batches' and 'ns'.
In total this function generates 8 SampleInputs
'batches' cases include:
() - single input,
(0,) - zero batched dimension,
(2,) - batch of two matrices,
(1, 1) - 1x1 batch of matrices
'ns' gives 0x0 and 5x5 matrices.
Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.
"""
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
batches = [(), (0,), (2,), (1, 1)]
ns = [5, 0]
for batch, n, upper in product(batches, ns, [True, False]):
a = random_hermitian_pd_matrix(n, *batch, dtype=dtype, device=device)
a.requires_grad = requires_grad
yield SampleInput(a, upper=upper)
def sample_inputs_linalg_eig(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates input for torch.linalg.eig
"""
def out_fn(output):
return output[0], abs(output[1])
samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)
for sample in samples:
sample.output_process_fn_grad = out_fn
yield sample
def sample_inputs_linalg_eigh(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates input for torch.linalg.eigh/eigvalsh with UPLO="U" or "L" keyword argument.
"""
def out_fn(output):
if isinstance(output, tuple):
# eigh function
return output[0], abs(output[1])
else:
# eigvalsh function
return output
# Samples do not need to be Hermitian, as we're using gradcheck_wrapper_hermitian_input
samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)
for sample in samples:
# Note: we cannot use np.random.choice here as TorchDynamo
# does not support tensors of strings.
sample.kwargs = {"UPLO": random.choice(["L", "U"])}
sample.output_process_fn_grad = out_fn
yield sample
def sample_inputs_linalg_pinv(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates input for torch.linalg.pinv with hermitian=False keyword argument.
"""
for o in sample_inputs_linalg_invertible(
op_info, device, dtype, requires_grad, **kwargs
):
real_dtype = o.input.real.dtype if dtype.is_complex else dtype
# requires_grad path for rtol tensor is not implemented
for rtol in (None, 1.0, torch.tensor(1.0, dtype=real_dtype, device=device)):
o = clone_sample(o)
o.kwargs = {"rtol": rtol}
yield o
def sample_inputs_linalg_pinv_hermitian(
op_info, device, dtype, requires_grad=False, **kwargs
):
"""
This function generates input for torch.linalg.pinv with hermitian=True keyword argument.
"""
for o in sample_inputs_linalg_invertible(
op_info, device, dtype, requires_grad, **kwargs
):
o.kwargs = {"hermitian": True}
yield o
def sample_inputs_linalg_solve(
op_info, device, dtype, requires_grad=False, vector_rhs_allowed=True, **kwargs
):
"""
This function generates always solvable input for torch.linalg.solve
We sample a fullrank square matrix (i.e. invertible) A
The first input to torch.linalg.solve is generated as the itertools.product of 'batches' and 'ns'.
The second input is generated as the product of 'batches', 'ns' and 'nrhs'.
In total this function generates 18 SampleInputs
'batches' cases include:
() - single input,
(0,) - zero batched dimension,
(2,) - batch of two matrices.
'ns' gives 0x0 and 5x5 matrices.
and 'nrhs' controls the number of vectors to solve for:
() - using 1 as the number of vectors implicitly
(1,) - same as () but explicit
(3,) - solve for 3 vectors.
Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.
'vector_rhs_allowed' controls whether to include nrhs = () to the list of SampleInputs.
torch.solve / triangular_solve / cholesky_solve (opposed to torch.linalg.solve) do not allow
1D tensors (vectors) as the right-hand-side.
Once torch.solve / triangular_solve / cholesky_solve and its testing are removed,
'vector_rhs_allowed' may be removed here as well.
"""
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_a = partial(
make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad
)
make_b = partial(
make_tensor, dtype=dtype, device=device, requires_grad=requires_grad
)
batches = [(), (0,), (2,), (2, 2)]
ns = [5, 0]
if vector_rhs_allowed:
nrhs = [(), (1,), (3,)]
else:
nrhs = [(1,), (3,)]
for n, batch, rhs in product(ns, batches, nrhs):
yield SampleInput(make_a(*batch, n, n), args=(make_b(batch + (n,) + rhs),))
def sample_inputs_linalg_solve_triangular(
op_info, device, dtype, requires_grad=False, **kwargs
):
make_arg = partial(make_tensor, dtype=dtype, device=device)
bs = (1, 2, 0)
ns = (3, 0)
ks = (1, 3, 0)
for b, n, k, (left, upper, uni) in product(
bs, ns, ks, product((True, False), repeat=3)
):
if b == 1:
A = make_arg((n, n)) if left else make_arg((k, k))
B = make_arg((n, k))
else:
A = make_arg((b, n, n)) if left else make_arg((b, k, k))
B = make_arg((b, n, k))
if uni:
# Not really necessary, but writing it for consistency
A.diagonal(0, -2, -1).fill_(1.0)
else:
d = A.diagonal(0, -2, -1)
d[d.abs() < 1e-6] = 1.0
if upper:
A.triu_()
else:
A.tril_()
kwargs = {"upper": upper, "left": left, "unitriangular": uni}
if requires_grad:
for grad_A, grad_B in product((True, False), repeat=2):
# Either A or B needs to have a gradient
if not grad_A and not grad_B:
continue
yield SampleInput(
A.clone().requires_grad_(grad_A),
args=(B.clone().requires_grad_(grad_B),),
kwargs=kwargs,
)
else:
yield SampleInput(A, args=(B,), kwargs=kwargs)
def sample_inputs_legacy_solve(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates always solvable input for legacy solve functions
(the ones that are not in torch.linalg module).
The difference from sample_inputs_linalg_solve is that here the right-hand-side of A x = b equation
should have b.ndim >= 2, vectors are not allowed.
Also the arguments order is swapped.
"""
out = sample_inputs_linalg_solve(
op_info, device, dtype, requires_grad=requires_grad, vector_rhs_allowed=False
)
def out_fn(output):
return output[0]
# Reverses tensor order
for sample in out:
sample.input, sample.args = sample.args[0], (sample.input,)
if op_info.name == "solve":
sample.output_process_fn_grad = out_fn
yield sample
def sample_inputs_linalg_lu(op_info, device, dtype, requires_grad=False, **kwargs):
full_rank = op_info.name == "linalg.lu_factor"
make_fn = (
make_tensor
if not full_rank
else make_fullrank_matrices_with_distinct_singular_values
)
make_arg = partial(make_fn, dtype=dtype, device=device, requires_grad=requires_grad)
def out_fn(output):
if op_info.name == "linalg.lu":
return output[1], output[2]
else:
return output
batch_shapes = ((), (3,), (3, 3))
# pivot=False only supported in CUDA
pivots = (True, False) if torch.device(device).type == "cuda" else (True,)
deltas = (-2, -1, 0, +1, +2)
for batch_shape, pivot, delta in product(batch_shapes, pivots, deltas):
shape = batch_shape + (S + delta, S)
# Insanely annoying that make_fullrank_blablabla accepts a *shape and not a tuple!
A = make_arg(shape) if not full_rank else make_arg(*shape)
yield SampleInput(A, kwargs={"pivot": pivot}, output_process_fn_grad=out_fn)
def sample_inputs_linalg_svdvals(op_info, device, dtype, requires_grad=False, **kwargs):
make_arg = partial(
make_tensor, dtype=dtype, device=device, requires_grad=requires_grad
)
batches = [(), (0,), (2,), (1, 1)]
ns = [5, 2, 0]
for batch, m, n in product(batches, ns, ns):
yield SampleInput(make_arg(batch + (m, n)))
def sample_inputs_linalg_qr_geqrf(
op_info, device, dtype, requires_grad=False, **kwargs
):
# QR is just well defined when the matrix is full rank
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(
make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad
)
batches = [(), (0,), (2,), (1, 1)]
ns = [5, 2, 0]
for batch, (m, n) in product(batches, product(ns, ns)):
shape = batch + (m, n)
yield SampleInput(make_arg(*shape))
def sample_inputs_tensorsolve(op_info, device, dtype, requires_grad, **kwargs):
a_shapes = [(2, 3, 6), (3, 4, 4, 3)]
# Zero-dim tensors are not supported in NumPy, so we skip them for now.
# NumPy is used in reference check tests.
# See https://github.com/numpy/numpy/pull/20482 for tracking NumPy bugfix.
# a_shapes += [(0, 0, 1, 2, 3, 0)]
dimss = [None, (0, 2)]
make_arg = partial(
make_tensor, dtype=dtype, device=device, requires_grad=requires_grad
)
for a_shape, dims in itertools.product(a_shapes, dimss):
a = make_arg(a_shape)
b = make_arg(a_shape[:2])
yield SampleInput(a, b, dims=dims)
def sample_inputs_tensorinv(op_info, device, dtype, requires_grad, **kwargs):
make_arg = make_fullrank_matrices_with_distinct_singular_values
def make_input():
return make_arg(12, 12, device=device, dtype=dtype, requires_grad=requires_grad)
# lhs / rhs shape can have any number of dimensions as long as their product equals 12
shapes = [
((2, 2, 3), (12, 1)),
((4, 3), (6, 1, 2)),
]
for shape_lhs, shape_rhs in shapes:
inp = make_input().reshape(*shape_lhs, *shape_rhs).detach()
inp.requires_grad_(requires_grad)
yield SampleInput(inp, ind=len(shape_lhs))
op_db: list[OpInfo] = [
OpInfo(
"linalg.cross",
ref=lambda x, y, dim=-1: np.cross(x, y, axis=dim),
op=torch.linalg.cross,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
aten_name="linalg_cross",
sample_inputs_func=sample_inputs_cross,
error_inputs_func=error_inputs_cross,
supports_out=True,
supports_fwgrad_bwgrad=True,
supports_forward_ad=True,
skips=(
DecorateInfo(
unittest.skip("Unsupported on MPS for now"),
"TestCommon",
"test_numpy_ref_mps",
),
),
),
OpInfo(
"linalg.det",
aten_name="linalg_det",
op=torch.linalg.det,
aliases=("det",),
dtypes=floating_and_complex_types(),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_linalg_det_logdet_slogdet,
decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver],
check_batched_gradgrad=False,
),
OpInfo(
"linalg.diagonal",
aten_name="linalg_diagonal",
aten_backward_name="diagonal_backward",
dtypes=all_types_and_complex_and(
torch.bool, torch.bfloat16, torch.float16, torch.chalf
),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_diagonal_diag_embed,
error_inputs_func=error_inputs_diagonal_diag_embed,
),
OpInfo(
"linalg.cholesky",
aten_name="linalg_cholesky",
dtypes=floating_and_complex_types(),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_linalg_cholesky,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],
),
OpInfo(
"linalg.cholesky_ex",
aten_name="linalg_cholesky_ex",
dtypes=floating_and_complex_types(),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_linalg_cholesky,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],
),
OpInfo(
"linalg.vecdot",
aten_name="linalg_vecdot",
ref=lambda x, y, *, dim=-1: (x.conj() * y).sum(dim),
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_linalg_vecdot,
check_batched_forward_grad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
"TestSchemaCheckModeOpInfo",
"test_schema_correctness",
dtypes=(torch.complex64, torch.complex128),
),
DecorateInfo(
unittest.skip("Unsupported on MPS for now"),
"TestCommon",
"test_numpy_ref_mps",
),
DecorateInfo(
toleranceOverride({torch.half: tol(atol=1.2e-2, rtol=1.7e-2)}),
"TestInductorOpInfo",
"test_comprehensive",
device_type="cuda",
),
),
),
OpInfo(
"linalg.cond",
aten_name="linalg_cond",
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_cond,
check_batched_gradgrad=False,
check_batched_forward_grad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off],
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestFakeTensor",
"test_fake_crossref_backward_amp",
device_type="cuda",
dtypes=[torch.float32],
active_if=TEST_WITH_ROCM,
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestFakeTensor",
"test_fake_crossref_backward_no_amp",
device_type="cuda",
dtypes=[torch.float32],
active_if=TEST_WITH_ROCM,
),
),
),
OpInfo(
"linalg.eig",
aten_name="linalg_eig",
op=torch.linalg.eig,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_eig,
check_batched_forward_grad=False,
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# AssertionError: Scalars are not equal!
DecorateInfo(
unittest.expectedFailure, "TestCommon", "test_out", device_type="cpu"
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_out",
device_type="mps",
dtypes=[torch.float32],
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_variant_consistency_eager",
device_type="mps",
dtypes=[torch.float32],
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
device_type="mps",
dtypes=[torch.float32],
),
),
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, with_tf32_off],
),
OpInfo(
"linalg.eigvals",
aten_name="linalg_eigvals",
op=torch.linalg.eigvals,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_invertible,
check_batched_forward_grad=False,
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_out",
device_type="mps",
dtypes=[torch.float32],
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_variant_consistency_eager",
device_type="mps",
dtypes=[torch.float32],
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
device_type="mps",
dtypes=[torch.float32],
),
),
),
OpInfo(
"linalg.eigh",
aten_name="linalg_eigh",
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_eigh,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
check_batched_forward_grad=False,
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, with_tf32_off],
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_out",
device_type="mps",
dtypes=[torch.float32],
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_variant_consistency_eager",
device_type="mps",
dtypes=[torch.float32],
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
device_type="mps",
dtypes=[torch.float32],
),
),
),
OpInfo(
"linalg.eigvalsh",
aten_name="linalg_eigvalsh",
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_eigh,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
check_batched_forward_grad=False,
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_out",
device_type="mps",
dtypes=[torch.float32],
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_variant_consistency_eager",
device_type="mps",
dtypes=[torch.float32],
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
device_type="mps",
dtypes=[torch.float32],
),
),
),
OpInfo(
"linalg.householder_product",
aten_name="linalg_householder_product",
op=torch.linalg.householder_product,
aliases=("orgqr",),
dtypes=floating_and_complex_types(),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
# TODO: backward uses in-place operations that vmap doesn't like
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_householder_product,
decorators=[
skipCUDAIfNoCusolver,
skipCPUIfNoLapack,
DecorateInfo(
toleranceOverride({torch.complex64: tol(atol=1e-3, rtol=1e-3)})
),
DecorateInfo(
unittest.skip("Skipped! Flaky"),
"TestFwdGradients",
"test_fn_fwgrad_bwgrad",
device_type="cpu",
dtypes=(torch.complex128,),
),
],
),
OpInfo(
"linalg.ldl_factor",
aten_name="linalg_ldl_factor",
dtypes=floating_and_complex_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_linalg_ldl_factor,
decorators=[skipCUDAIfNoMagmaAndNoLinalgsolver, skipCPUIfNoLapack],
),
OpInfo(
"linalg.ldl_factor_ex",
aten_name="linalg_ldl_factor_ex",
dtypes=floating_and_complex_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_linalg_ldl_factor,
decorators=[skipCUDAIfNoMagmaAndNoLinalgsolver, skipCPUIfNoLapack],
),
OpInfo(
"linalg.ldl_solve",
aten_name="linalg_ldl_solve",
dtypes=floating_and_complex_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_linalg_ldl_solve,
decorators=[
skipCUDAIf(
_get_torch_cuda_version() < (11, 4), "not available before CUDA 11.3.1"
),
skipCUDAIfNoCusolver,
skipCUDAIfRocm,
skipCPUIfNoLapack,
],
),
OpInfo(
"linalg.lstsq",
aten_name="linalg_lstsq",
dtypes=floating_and_complex_types(),
supports_out=True,
sample_inputs_func=sample_inputs_linalg_lstsq,
error_inputs_func=error_inputs_lstsq,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
skips=(
# we skip gradient checks for this suite as they are tested in
# variant_test_name='grad_oriented'
DecorateInfo(unittest.skip("Skipped!"), "TestFwdGradients"),
DecorateInfo(unittest.skip("Skipped!"), "TestBwdGradients"),
# The values for attribute 'shape' do not match
DecorateInfo(unittest.skip("Skipped!"), "TestCommon", "test_out"),
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_out",
device_type="mps",
dtypes=[torch.float32],
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_variant_consistency_eager",
device_type="mps",
dtypes=[torch.float32],
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
device_type="mps",
dtypes=[torch.float32],
),
),
),
OpInfo(
"linalg.lstsq",
aten_name="linalg_lstsq",
variant_test_name="grad_oriented",
# gradchecks for forward AD fails with full output tuple
# works when taking [:2], which is (solution, residuals)
op=lambda a, b, driver: torch.linalg.lstsq(a, b, driver=driver)[:2],
supports_out=False,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_lstsq,
error_inputs_func=error_inputs_lstsq_grad_oriented,
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
skips=(
# tests do not work with passing lambda for op
DecorateInfo(
unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"
),
DecorateInfo(
unittest.expectedFailure,
"TestOperatorSignatures",
"test_get_torch_func_signature_exhaustive",
),
),
),
OpInfo(
"linalg.matrix_power",
aliases=("matrix_power",),
aten_name="linalg_matrix_power",
dtypes=floating_and_complex_types(),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_inplace_autograd=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_grad=False,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off],
sample_inputs_func=sample_inputs_linalg_matrix_power,
),
OpInfo(
"linalg.multi_dot",
# Need this lambda because gradcheck does not work with TensorList inputs
aten_name="linalg_multi_dot",
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
supports_inplace_autograd=False,
# Batched grad checks fail for empty input tensors (see https://github.com/pytorch/pytorch/issues/53407)
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_linalg_multi_dot,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
# https://github.com/pytorch/pytorch/issues/67470
DecorateInfo(
unittest.skip("67470!"), "TestCommon", "test_noncontiguous_samples"
),
# Fails on XLA.
# AssertionError: False is not true : Tensors failed to compare as equal!
DecorateInfo(
unittest.skip("Skipped!"),
"TestOpInfo",
device_type="xla",
dtypes=(torch.long,),
),
# https://github.com/pytorch/pytorch/issues/71774
DecorateInfo(
unittest.skip("Skipped!"),
"TestNNCOpInfo",
"test_nnc_correctness",
device_type="cpu",
dtypes=(torch.long,),
),
),
),
# NB: linalg.norm has two variants so that different skips can be used for different sample inputs
OpInfo(
"linalg.norm",
aten_name="linalg_norm",
op=torch.linalg.norm,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off],
sample_inputs_func=sample_inputs_linalg_norm,
supports_forward_ad=True,
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
skips=(
DecorateInfo(
unittest.expectedFailure, "TestBwdGradients", "test_fn_gradgrad"
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestFakeTensor",
"test_fake_crossref_backward_amp",
device_type="cuda",
dtypes=[torch.float32],
active_if=TEST_WITH_ROCM,
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestFakeTensor",
"test_fake_crossref_backward_no_amp",
device_type="cuda",
dtypes=[torch.float32],
active_if=TEST_WITH_ROCM,
),
),
),
OpInfo(
"linalg.norm",
op=torch.linalg.norm,
variant_test_name="subgradients_at_zero",
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off],
sample_inputs_func=partial(
sample_inputs_linalg_norm, variant="subgradient_at_zero"
),
aten_name="linalg_norm",
supports_forward_ad=True,
# torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got:
# Could not allocate memory to change Tensor SizesAndStrides!
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
skips=(
# [NEW] Skips specifically for sample inputs at zero
# norm's vjp/jvp are not well-conditioned near zero
DecorateInfo(
unittest.expectedFailure, "TestBwdGradients", "test_fn_gradgrad"
),
DecorateInfo(
unittest.expectedFailure, "TestFwdGradients", "test_fn_fwgrad_bwgrad"
),
DecorateInfo(
unittest.expectedFailure, "TestFwdGradients", "test_forward_mode_AD"
),
DecorateInfo(unittest.expectedFailure, "TestBwdGradients", "test_fn_grad"),
),
),
OpInfo(
"linalg.matrix_norm",
aten_name="linalg_matrix_norm",
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
check_batched_forward_grad=False,
check_batched_gradgrad=False,
supports_fwgrad_bwgrad=True,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off],
sample_inputs_func=sample_inputs_linalg_matrix_norm,
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestFakeTensor",
"test_fake_crossref_backward_amp",
device_type="cuda",
dtypes=[torch.float32],
active_if=TEST_WITH_ROCM,
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestFakeTensor",
"test_fake_crossref_backward_no_amp",
device_type="cuda",
dtypes=[torch.float32],
active_if=TEST_WITH_ROCM,
),
),
),
OpInfo(
"linalg.qr",
aten_name="linalg_qr",
op=torch.linalg.qr,
dtypes=floating_and_complex_types(),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# In-place ops
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_qr_geqrf,
decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack],
),
OpInfo(
"linalg.slogdet",
aten_name="linalg_slogdet",
op=torch.linalg.slogdet,
dtypes=floating_and_complex_types(),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_linalg_det_logdet_slogdet,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],
),
OpInfo(
"linalg.vander",
aten_name="linalg_vander",
ref=np_vander_batched,
op=torch.linalg.vander,
dtypes=all_types_and_complex(),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
sample_inputs_func=sample_inputs_linalg_vander,
skips=(
DecorateInfo(
unittest.skip("Unsupported on MPS for now"),
"TestCommon",
"test_numpy_ref_mps",
),
),
),
ReductionOpInfo(
"linalg.vector_norm",
op=torch.linalg.vector_norm,
identity=0,
nan_policy="propagate",
supports_multiple_dims=True,
complex_to_real=True,
supports_forward_ad=True,
# torch.autograd.gradcheck.GradcheckError: While computing batched gradients
# got: Could not allocate memory to change Tensor SizesAndStrides!
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
generate_args_kwargs=sample_kwargs_vector_norm,
aten_name="linalg_vector_norm",
skips=(
# FIXME: sum reduces all dimensions when dim=[]
DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"),
DecorateInfo(
unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim"
),
),
),
OpInfo(
"linalg.lu_factor",
aten_name="linalg_lu_factor",
op=torch.linalg.lu_factor,
dtypes=floating_and_complex_types(),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_linalg_lu,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],
skips=(
# linalg.lu_factor: LU without pivoting is not implemented on the CPU
DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"),
),
),
OpInfo(
"linalg.lu_factor_ex",
aten_name="linalg_lu_factor_ex",
op=torch.linalg.lu_factor_ex,
dtypes=floating_and_complex_types(),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_linalg_lu,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],
skips=(
# linalg.lu_factor: LU without pivoting is not implemented on the CPU
DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"),
),
),
OpInfo(
"linalg.lu",
aten_name="linalg_lu",
op=torch.linalg.lu,
dtypes=floating_and_complex_types(),
# https://github.com/pytorch/pytorch/issues/80411
# Runs very slowly on slow-gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_linalg_lu,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],
skips=(
# linalg.lu_factor: LU without pivoting is not implemented on the CPU
DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"),
),
),
OpInfo(
"linalg.lu_solve",
op=torch.linalg.lu_solve,
aten_name="linalg_lu_solve",
dtypes=floating_and_complex_types(),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_lu_solve,
skips=(
DecorateInfo(
unittest.skip("Tests different backward paths"),
"TestCommon",
"test_floating_inputs_are_differentiable",
),
),
decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver],
),
OpInfo(
"linalg.inv",
aten_name="linalg_inv",
op=torch.linalg.inv,
aliases=("inverse",),
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_invertible,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_out",
device_type="mps",
dtypes=[torch.float32],
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_variant_consistency_eager",
device_type="mps",
dtypes=[torch.float32],
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
device_type="mps",
dtypes=[torch.float32],
),
),
),
OpInfo(
"linalg.inv_ex",
aten_name="linalg_inv_ex",
op=torch.linalg.inv_ex,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_invertible,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_out",
device_type="mps",
dtypes=[torch.float32],
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_variant_consistency_eager",
device_type="mps",
dtypes=[torch.float32],
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
device_type="mps",
dtypes=[torch.float32],
),
),
),
OpInfo(
"linalg.solve",
aten_name="linalg_solve",
op=torch.linalg.solve,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_solve,
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=[
skipCUDAIfNoMagmaAndNoCusolver,
skipCPUIfNoLapack,
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1.3e-05, rtol=6e-04)}),
"TestCommon",
"test_noncontiguous_samples",
device_type="cpu",
),
],
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_out",
device_type="mps",
dtypes=[torch.float32],
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_variant_consistency_eager",
device_type="mps",
dtypes=[torch.float32],
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
device_type="mps",
dtypes=[torch.float32],
),
),
),
OpInfo(
"linalg.solve_ex",
aten_name="linalg_solve_ex",
op=torch.linalg.solve_ex,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_solve,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=[
skipCUDAIfNoMagmaAndNoCusolver,
skipCPUIfNoLapack,
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1.3e-05, rtol=6e-04)}),
"TestCommon",
"test_noncontiguous_samples",
device_type="cpu",
),
],
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_out",
device_type="mps",
dtypes=[torch.float32],
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_variant_consistency_eager",
device_type="mps",
dtypes=[torch.float32],
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
device_type="mps",
dtypes=[torch.float32],
),
),
),
OpInfo(
"linalg.solve_triangular",
aten_name="linalg_solve_triangular",
op=torch.linalg.solve_triangular,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_solve_triangular,
supports_fwgrad_bwgrad=True,
skips=(skipCPUIfNoLapack,),
# linalg.solve_triangular cannot be batched over because of a call to out.copy_(result);
supports_forward_ad=True,
),
OpInfo(
"linalg.matrix_rank",
aten_name="linalg_matrix_rank",
dtypes=floating_and_complex_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_matrix_rank,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_out",
device_type="mps",
dtypes=[torch.float32],
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_variant_consistency_eager",
device_type="mps",
dtypes=[torch.float32],
),
# jit doesn't accept tensor inputs for matrix rank
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
dtypes=[torch.complex64, torch.float32],
),
),
),
OpInfo(
"linalg.matrix_rank",
aten_name="linalg_matrix_rank",
variant_test_name="hermitian",
dtypes=floating_and_complex_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_linalg_pinv_hermitian,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_out",
device_type="mps",
dtypes=[torch.float32],
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
device_type="mps",
dtypes=[torch.float32],
),
),
),
OpInfo(
"linalg.pinv",
aten_name="linalg_pinv",
op=torch.linalg.pinv,
dtypes=floating_and_complex_types(),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_linalg_pinv,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],
skips=(
# errors with "leaked XXXX bytes CUDA memory on device 0"
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
device_type="cuda",
),
),
),
OpInfo(
"linalg.pinv",
aten_name="linalg_pinv",
variant_test_name="singular",
# pinv is Frechet-differentiable in a rank-preserving neighborhood,
# so we feed inputs that are the products of two full-rank factors,
# to avoid any rank changes caused by the perturbations in the gradcheck
op=lambda a, b: torch.linalg.pinv(a @ b.mT),
dtypes=floating_and_complex_types(),
supports_out=False,
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_linalg_pinv_singular,
# Only large tensors show issues with implicit backward used prior to
# explicit backward implementation.
decorators=[slowTest, skipCUDAIfNoCusolver, skipCPUIfNoLapack],
skips=(
DecorateInfo(
unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"
),
# CUDA runs out of memory
DecorateInfo(
unittest.skip("Skipped!"),
"TestFwdGradients",
"test_fn_fwgrad_bwgrad",
device_type="cuda",
dtypes=[torch.cdouble],
),
# This test takes almost 2 hours to run!
DecorateInfo(
unittest.skip("Skipped!"),
"TestBwdGradients",
"test_fn_gradgrad",
device_type="cuda",
dtypes=[torch.cdouble],
),
),
),
OpInfo(
"linalg.pinv",
aten_name="linalg_pinv",
variant_test_name="hermitian",
dtypes=floating_and_complex_types(),
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_linalg_pinv_hermitian,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_out",
device_type="mps",
dtypes=[torch.float32],
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_variant_consistency_eager",
device_type="mps",
dtypes=[torch.float32],
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
device_type="mps",
dtypes=[torch.float32],
),
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-5)}),
"TestCommon",
"test_noncontiguous_samples",
device_type="cuda",
),
# This test is flaky under slow gradcheck, likely due to rounding issues
DecorateInfo(
skipIfSlowGradcheckEnv,
"TestFwdGradients",
"test_fn_fwgrad_bwgrad",
device_type="cuda",
),
),
),
OpInfo(
"linalg.svd",
op=torch.linalg.svd,
aten_name="linalg_svd",
decomp_aten_name="_linalg_svd",
dtypes=floating_and_complex_types(),
# Runs very slowly on slow-gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_fwgrad_bwgrad=True,
supports_forward_ad=True,
check_batched_forward_grad=False,
# We're using at::allclose, which does not have a batching rule
check_batched_grad=False,
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_svd,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off],
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_out",
device_type="mps",
dtypes=[torch.float32],
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_variant_consistency_eager",
device_type="mps",
dtypes=[torch.float32],
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
device_type="mps",
dtypes=[torch.float32],
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestFakeTensor",
"test_fake_crossref_backward_amp",
device_type="cuda",
dtypes=[torch.float32],
active_if=TEST_WITH_ROCM,
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestFakeTensor",
"test_fake_crossref_backward_no_amp",
device_type="cuda",
dtypes=[torch.float32],
active_if=TEST_WITH_ROCM,
),
),
),
OpInfo(
"linalg.svdvals",
op=torch.linalg.svdvals,
aten_name="linalg_svdvals",
decomp_aten_name="_linalg_svd",
dtypes=floating_and_complex_types(),
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
supports_forward_ad=True,
# We're using at::allclose, which does not have a batching rule
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_svdvals,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off],
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestFakeTensor",
"test_fake_crossref_backward_amp",
device_type="cuda",
dtypes=[torch.float32],
active_if=TEST_WITH_ROCM,
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestFakeTensor",
"test_fake_crossref_backward_no_amp",
device_type="cuda",
dtypes=[torch.float32],
active_if=TEST_WITH_ROCM,
),
),
),
OpInfo(
"linalg.tensorinv",
ref=np.linalg.tensorinv,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_tensorinv,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver],
skips=(
DecorateInfo(
unittest.skip("Unsupported on MPS for now"),
"TestCommon",
"test_numpy_ref_mps",
),
),
),
OpInfo(
"linalg.tensorsolve",
ref=lambda a, b, dims=None: np.linalg.tensorsolve(a, b, axes=dims),
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_tensorsolve,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=[
skipCUDAIfNoMagmaAndNoCusolver,
skipCPUIfNoLapack,
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-03)}),
"TestCommon",
"test_noncontiguous_samples",
device_type="cuda",
),
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=8e-04, rtol=7e-06)}),
"TestCommon",
"test_noncontiguous_samples",
device_type="cpu",
),
],
skips=(
DecorateInfo(
unittest.skip("Unsupported on MPS for now"),
"TestCommon",
"test_numpy_ref_mps",
),
),
),
]
python_ref_db: list[OpInfo] = [
#
# torch.linalg
#
PythonRefInfo(
"_refs.linalg.cross",
torch_opinfo_name="linalg.cross",
supports_out=True,
op_db=op_db,
skips=(
# TODO: is this really needed?
DecorateInfo(
unittest.expectedFailure, "TestCommon", "test_python_ref_errors"
),
),
),
PythonRefInfo(
"_refs.linalg.diagonal",
torch_opinfo_name="linalg.diagonal",
supports_out=False,
op_db=op_db,
),
PythonRefInfo(
"_refs.linalg.vecdot",
torch_opinfo_name="linalg.vecdot",
op_db=op_db,
),
ReductionPythonRefInfo(
"_refs.linalg.vector_norm",
torch_opinfo_name="linalg.vector_norm",
supports_out=True,
op_db=op_db,
skips=(
# FIXME: sum reduces all dimensions when dim=[]
DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"),
DecorateInfo(
unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim"
),
),
),
PythonRefInfo(
"_refs.linalg.matrix_norm",
torch_opinfo_name="linalg.matrix_norm",
supports_out=True,
# Uses vector_norm inside and vector_norm is affected by
# https://github.com/pytorch/pytorch/issues/77216
validate_view_consistency=False,
op_db=op_db,
),
PythonRefInfo(
"_refs.linalg.norm",
torch_opinfo_name="linalg.norm",
supports_out=True,
# Uses vector_norm inside and vector_norm is affected by
# https://github.com/pytorch/pytorch/issues/77216
validate_view_consistency=False,
op_db=op_db,
),
PythonRefInfo(
"_refs.linalg.svd",
torch_opinfo_name="linalg.svd",
supports_out=True,
op_db=op_db,
),
PythonRefInfo(
"_refs.linalg.svdvals",
torch_opinfo_name="linalg.svdvals",
supports_out=True,
op_db=op_db,
),
]
```
|
============================================================================================================================================
SOURCE CODE FILE: nested.py
LINES: 1
SIZE: 59.88 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\opinfo\definitions\nested.py
ENCODING: utf-8
```py
# mypy: ignore-errors
import math
from copy import copy
from dataclasses import dataclass
from functools import partial
from typing import Optional
import torch
from torch.fx.experimental.symbolic_shapes import is_nested_int
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.opinfo.core import (
BinaryUfuncInfo,
ReductionOpInfo,
SampleInput,
UnaryUfuncInfo,
)
from torch.utils._pytree import tree_flatten, tree_map
@dataclass
class ExtraOpData:
"""
Contains info on top of the typical OpInfo data that is useful for NJT test generation.
The process that converts the standard op_db -> an NJT-compatible op_db will attach this
data onto each associated OpInfo entry.
"""
# Indicates whether the associated op is a view op
is_view: bool = False
# Specifies the names of any dim-related args that the op takes in. This is useful
# for NJT tests because there is often asymmetry across the supported set of dims for
# an op; it may make sense to operate over the batch dim but not the ragged dim, for
# example. The length of this list should match the number of relevant overloads.
# Each list item of the outer list should specify dim argnames. Ellipses should be used
# to indicate multi-dim support for a given overload.
#
# For example, squeeze() has both a dim and multi-dim overload, where the argname for
# each is simply "dim". Its entry should be: [["dim"], ["dim..."]].
#
# If no overload of the op accepts dim-related args, this should be None.
dim_args: list[list[str]] = None
# Helper function to extract names of dim-related args.
# Returns: tuple of (single dim argname if available, dim list argname if available)
# If the op doesn't support dim-related args at all OR this op only has overloads
# with multiple dim args (e.g. transpose()), then this returns (None, None).
def get_dim_argnames(self) -> tuple[Optional[str], Optional[str]]:
if self.dim_args is None:
return (None, None)
# name for the dim arg that supports a single dim
single_dim_argname = None
# name for the dim arg that supports a list of dims
dimlist_argname = None
for overload in self.dim_args:
# only consider overloads with a single dim-related arg
if len(overload) != 1:
continue
if overload[0].endswith("..."):
dimlist_argname = overload[0].replace("...", "")
if single_dim_argname is None:
single_dim_argname = dimlist_argname
else:
single_dim_argname = overload[0]
return (single_dim_argname, dimlist_argname)
# Mapping of OpInfo full names -> extra data to tack onto the OpInfo entry for use
# in test generation.
extra_op_data = {
"_segment_reduce.lengths": ExtraOpData(dim_args=[["axis0"]]),
"_segment_reduce.offsets": ExtraOpData(dim_args=[["axis0"]]),
"all": ExtraOpData(dim_args=[["dim"], ["dim..."]]),
"argmax": ExtraOpData(dim_args=[["dim"]]),
"argmin": ExtraOpData(dim_args=[["dim"]]),
"amax": ExtraOpData(dim_args=[["dim..."]]),
"amin": ExtraOpData(dim_args=[["dim..."]]),
"any": ExtraOpData(dim_args=[["dim"], ["dim..."]]),
"argsort": ExtraOpData(dim_args=[["dim"]]),
"broadcast_to": ExtraOpData(is_view=True),
"cat": ExtraOpData(dim_args=[["dim"]]),
"chunk": ExtraOpData(is_view=True, dim_args=[["dim"]]),
"conj": ExtraOpData(is_view=True),
"contiguous": ExtraOpData(is_view=True),
"count_nonzero": ExtraOpData(dim_args=[["dim"], ["dim..."]]),
"cummax": ExtraOpData(dim_args=[["dim"]]),
"cummin": ExtraOpData(dim_args=[["dim"]]),
"cumprod": ExtraOpData(dim_args=[["dim"]]),
"cumsum": ExtraOpData(dim_args=[["dim"]]),
"cumulative_trapezoid": ExtraOpData(dim_args=[["dim"]]),
"diag_embed": ExtraOpData(dim_args=[["dim1", "dim2"]]),
"diagonal": ExtraOpData(is_view=True, dim_args=[["dim1", "dim2"]]),
"diagonal_copy": ExtraOpData(dim_args=[["dim1", "dim2"]]),
"diagonal_scatter": ExtraOpData(dim_args=[["dim1", "dim2"]]),
"diff": ExtraOpData(dim_args=[["dim"]]),
"expand": ExtraOpData(is_view=True),
"expand_as": ExtraOpData(is_view=True),
"fft.fft": ExtraOpData(dim_args=[["dim"]]),
"fft.hfft": ExtraOpData(dim_args=[["dim"]]),
"fft.ifft": ExtraOpData(dim_args=[["dim"]]),
"fft.ihfft": ExtraOpData(dim_args=[["dim"]]),
"fft.irfft": ExtraOpData(dim_args=[["dim"]]),
"fft.rfft": ExtraOpData(dim_args=[["dim"]]),
"flatten": ExtraOpData(is_view=True, dim_args=[["start_dim", "end_dim"]]),
"flip": ExtraOpData(dim_args=[["dims..."]]),
"gather": ExtraOpData(dim_args=[["dim"]]),
"imag": ExtraOpData(is_view=True),
"index_add": ExtraOpData(dim_args=[["dim"]]),
"index_copy": ExtraOpData(dim_args=[["dim"]]),
"index_fill": ExtraOpData(dim_args=[["dim"]]),
"index_reduce.amax": ExtraOpData(dim_args=[["dim"]]),
"index_reduce.amin": ExtraOpData(dim_args=[["dim"]]),
"index_reduce.mean": ExtraOpData(dim_args=[["dim"]]),
"index_reduce.prod": ExtraOpData(dim_args=[["dim"]]),
"index_select": ExtraOpData(dim_args=[["dim"]]),
"kthvalue": ExtraOpData(dim_args=[["dim"]]),
"linalg.cross": ExtraOpData(dim_args=[["dim"]]),
"linalg.diagonal": ExtraOpData(is_view=True, dim_args=[["dim1", "dim2"]]),
"linalg.tensorsolve": ExtraOpData(dim_args=[["dims..."]]),
"linalg.vecdot": ExtraOpData(dim_args=[["dim"]]),
"linalg.vector_norm": ExtraOpData(dim_args=[["dim..."]]),
"log_softmax": ExtraOpData(dim_args=[["dim"]]),
"logcumsumexp": ExtraOpData(dim_args=[["dim"]]),
"masked.amax": ExtraOpData(dim_args=[["dim"]]),
"masked.amin": ExtraOpData(dim_args=[["dim"]]),
"masked.argmax": ExtraOpData(dim_args=[["dim"]]),
"masked.argmin": ExtraOpData(dim_args=[["dim"]]),
"masked.logsumexp": ExtraOpData(dim_args=[["dim"]]),
"masked.mean": ExtraOpData(dim_args=[["dim"]]),
"masked.norm": ExtraOpData(dim_args=[["dim"]]),
"masked.prod": ExtraOpData(dim_args=[["dim"]]),
"masked.std": ExtraOpData(dim_args=[["dim"]]),
"masked.sum": ExtraOpData(dim_args=[["dim"]]),
"masked.var": ExtraOpData(dim_args=[["dim"]]),
"max.reduction_with_dim": ExtraOpData(dim_args=[["dim"]]),
"median": ExtraOpData(dim_args=[["dim"]]),
"mean": ExtraOpData(dim_args=[["dim..."]]),
"min.reduction_with_dim": ExtraOpData(dim_args=[["dim"]]),
"mode": ExtraOpData(dim_args=[["dim"]]),
"movedim": ExtraOpData(
dim_args=[["source", "destination"], ["source...", "destination..."]]
),
"nanmean": ExtraOpData(dim_args=[["dim..."]]),
"nanmedian": ExtraOpData(dim_args=[["dim"]]),
"nansum": ExtraOpData(dim_args=[["dim..."]]),
"narrow": ExtraOpData(is_view=True, dim_args=[["dim"]]),
"narrow_copy": ExtraOpData(dim_args=[["dim"]]),
"nn.functional.cosine_similarity": ExtraOpData(dim_args=[["dim"]]),
"nn.functional.glu": ExtraOpData(dim_args=[["dim"]]),
"permute": ExtraOpData(is_view=True, dim_args=[["dims..."]]),
"positive": ExtraOpData(is_view=True),
"prod": ExtraOpData(dim_args=[["dim"]]),
"ravel": ExtraOpData(is_view=True),
"real": ExtraOpData(is_view=True),
"renorm": ExtraOpData(dim_args=[["dim"]]),
"reshape": ExtraOpData(is_view=True),
"reshape_as": ExtraOpData(is_view=True),
"roll": ExtraOpData(dim_args=[["dims..."]]),
"rot90": ExtraOpData(dim_args=[["dims..."]]),
"scatter": ExtraOpData(dim_args=[["dim"]]),
"scatter_add": ExtraOpData(dim_args=[["dim"]]),
"scatter_reduce.amax": ExtraOpData(dim_args=[["dim"]]),
"scatter_reduce.amin": ExtraOpData(dim_args=[["dim"]]),
"scatter_reduce.mean": ExtraOpData(dim_args=[["dim"]]),
"scatter_reduce.prod": ExtraOpData(dim_args=[["dim"]]),
"scatter_reduce.sum": ExtraOpData(dim_args=[["dim"]]),
"select": ExtraOpData(is_view=True, dim_args=[["dim"]]),
"select_scatter": ExtraOpData(dim_args=[["dim"]]),
"slice": ExtraOpData(is_view=True, dim_args=[["dim"]]),
"slice_scatter": ExtraOpData(dim_args=[["dim"]]),
"softmax": ExtraOpData(dim_args=[["dim"]]),
"sort": ExtraOpData(dim_args=[["dim"]]),
"split": ExtraOpData(is_view=True, dim_args=[["dim"]]),
"split_with_sizes": ExtraOpData(is_view=True, dim_args=[["dim"]]),
"split_with_sizes_copy": ExtraOpData(dim_args=[["dim"]]),
"squeeze": ExtraOpData(is_view=True, dim_args=[["dim"], ["dim..."]]),
"squeeze_copy": ExtraOpData(dim_args=[["dim"], ["dim..."]]),
"stack": ExtraOpData(dim_args=[["dim"]]),
"std": ExtraOpData(dim_args=[["dim..."]]),
"std.unbiased": ExtraOpData(dim_args=[["dim..."]]),
"sum": ExtraOpData(dim_args=[["dim..."]]),
"t": ExtraOpData(is_view=True),
"tensor_split": ExtraOpData(is_view=True, dim_args=[["dim"]]),
"tensordot": ExtraOpData(dim_args=[["dims..."]]),
"tile": ExtraOpData(dim_args=[["dims..."]]),
"topk": ExtraOpData(dim_args=[["dim"]]),
"transpose": ExtraOpData(is_view=True, dim_args=[["dim0", "dim1"]]),
"transpose_copy": ExtraOpData(dim_args=[["dim0", "dim1"]]),
"trapezoid": ExtraOpData(dim_args=[["dim"]]),
"trapz": ExtraOpData(dim_args=[["dim"]]),
"unbind": ExtraOpData(is_view=True, dim_args=[["dim"]]),
"unflatten": ExtraOpData(is_view=True, dim_args=[["dim"]]),
"unfold": ExtraOpData(is_view=True, dim_args=[["dimension"]]),
"unfold_copy": ExtraOpData(dim_args=[["dimension"]]),
"unsafe_chunk": ExtraOpData(dim_args=[["dim"]]),
"unsafe_split": ExtraOpData(dim_args=[["dim"]]),
"unsqueeze": ExtraOpData(is_view=True, dim_args=[["dim"]]),
"unsqueeze_copy": ExtraOpData(dim_args=[["dim"]]),
"var": ExtraOpData(dim_args=[["dim..."]]),
"var.unbiased": ExtraOpData(dim_args=[["dim..."]]),
"view": ExtraOpData(is_view=True),
"view_as": ExtraOpData(is_view=True),
"view_as_complex": ExtraOpData(is_view=True),
"view_as_real": ExtraOpData(is_view=True),
}
# random integer used for sizes
def _rnd():
return torch.randint(3, 8, ()).item()
def _raggedness_matches(nt1, nt2):
return (
nt1.is_nested
and nt2.is_nested
and nt1._ragged_idx == nt2._ragged_idx
and nt1.shape[nt1._ragged_idx] == nt2.shape[nt2._ragged_idx]
)
# Helper function to avoid reusing the exact same tensor / NJT across SampleInputs,
# as this causes autograd problems.
def _clone(t):
requires_grad = t.requires_grad
return t.detach().clone().requires_grad_(requires_grad)
# Helper function to update a sample with new kwargs / name
def _update_sample(sample, new_kwargs):
all_kwargs = dict(sample.kwargs)
all_kwargs.update(new_kwargs)
full_name = ", ".join([sample.name, *(f"{k}={v}" for (k, v) in new_kwargs.items())])
return SampleInput(
_clone(sample.input),
args=sample.args,
kwargs=all_kwargs,
name=full_name,
)
# Generates a random NT.
# dims should be something like [5, None, 10], with None indicating that a
# random ragged structure should be used
def random_nt_from_dims(
dims, device=None, dtype=None, layout=torch.strided, requires_grad=False
):
sizes = [[d if d is not None else _rnd() for d in dims[1:]] for d in range(dims[0])]
return torch.nested.nested_tensor(
[torch.randn(*size) for size in sizes],
device=device,
dtype=dtype,
layout=layout,
requires_grad=requires_grad,
)
# Helper function to get a reasonable string representation of an NJT for use in
# SampleInput names.
def _describe_njt(njt) -> str:
contig_type = "_contig" if njt.is_contiguous() else "_noncontig"
if njt._lengths is not None and njt._offsets is not None:
contig_type += "_holes"
elif njt._ragged_idx != 1:
contig_type += "_transposed"
cached_data = "_without_seqlen_cache"
if njt._max_seqlen_tensor is not None:
cached_data = "_with_seqlen_cache"
return f"{njt.dim()}D{contig_type}{cached_data}"
# Helper function to get a reasonable string representation of a given dim wrt an NJT.
def _describe_dim(njt, dim):
if dim == 0:
return "batch_dim"
elif dim == njt._ragged_idx:
return "ragged_dim"
return "normal_dim"
# Helper function for generating a comprehensive set of NJT sample inputs.
def _sample_njts(device, dtype, requires_grad=False, dims=None):
if dims is None:
dims = [2, 3, 4]
if not isinstance(dims, (list, tuple)):
dims = [dims]
# contiguous NJTs
for dim in dims:
# with min / max seqlen cached
shape = (_rnd(), None, *[_rnd() for _ in range(dim - 2)])
nt = random_nt_from_dims(
shape,
device=device,
dtype=dtype,
requires_grad=requires_grad,
layout=torch.jagged,
)
yield nt
# without min / max seqlen cached
values = _clone(nt.values())
offsets = _clone(nt.offsets())
yield torch.nested.nested_tensor_from_jagged(values, offsets).requires_grad_(
requires_grad
)
# non-contiguous transposed NJT (not possible for 2D)
if dim > 2:
yield nt.transpose(-1, nt._ragged_idx)
# non-contiguous with holes NJT
values = _clone(nt.values())
offsets = _clone(nt.offsets())
# subtract 1 to cause holes
lengths = _clone(offsets.diff() - 1)
yield torch.nested.nested_tensor_from_jagged(
values=values,
offsets=offsets,
lengths=lengths,
).requires_grad_(requires_grad)
# Computes an unbind-based reference for a given OpInfo on a given SampleInput.
# This reference unbinds the input NJT and invokes the op on each of the components,
# optionally wrapping the result in an NJT.
def unbind_reference(op, sample, wrap_output_as_njt=True):
# first NJT in the arglist determines expected ragged structure
nt_inp = (
sample.input
if sample.input.is_nested
# TODO: look in kwargs too?
else next(a for a in sample.args if a.is_nested)
)
out_ref_components = []
for i in range(nt_inp.shape[0]):
def _slice_input(t, i=i, inp=nt_inp):
# any NJT with the same ragged structure as the input should
# be sliced to pass to the reference
if isinstance(t, torch.Tensor) and _raggedness_matches(t, inp):
return t[i]
# allow the SampleInput to tell us how to slice it for ref calculation
elif isinstance(t, torch.Tensor) and hasattr(t, "_batch_dim"):
bdim = t._batch_dim # type: ignore[attr]
if t.shape[bdim] == 1:
return t[0]
else:
return t.select(bdim, i)
else:
return t
inp = _slice_input(sample.input)
args = tree_map(_slice_input, sample.args)
kwargs = tree_map(_slice_input, sample.kwargs)
# Handle indices in index_put
if "index_put" in op.full_name and "indices" in kwargs:
if len(kwargs["indices"]) > 1:
# If after unrolling we still have indices left, use them
kwargs["indices"] = [t[i] for t in kwargs["indices"][1:]]
else:
# If no indices are left, create them so they match the NJT implementation
sequence_put = kwargs["indices"][0].tolist()
if i in sequence_put:
kwargs["indices"] = [
torch.tensor(
list(range(inp.shape[0])),
dtype=torch.int32,
device=kwargs["indices"][0].device,
)
]
else:
kwargs["indices"] = [
torch.tensor(
[], dtype=torch.int32, device=kwargs["indices"][0].device
)
]
from torch.nested._internal.ops import _outer_to_inner_dim
# Need to adjust dims to apply on NJT component
if op._extra_op_data.dim_args is not None:
# get all possible dim-related argnames that could be encountered for this op
argnames = tree_map(
lambda a: a.replace("...", ""),
tree_flatten(op._extra_op_data.dim_args)[0],
)
# for all dim-related args present, convert from outer -> inner dim space
for argname in {a for a in argnames if a in kwargs}:
# allow the SampleInput to tell us how to canonicalize the dim kwargs
ndim = nt_inp._ndim if hasattr(nt_inp, "_ndim") else nt_inp.dim()
kwargs[argname] = _outer_to_inner_dim(
ndim, kwargs[argname], nt_inp._ragged_idx, canonicalize=True
)
out_ref_component = op.op(inp, *args, **kwargs)
out_ref_components.append(out_ref_component)
if wrap_output_as_njt:
# handle list / tuple of outputs
if len(out_ref_components) > 0 and isinstance(
out_ref_components[0], (list, tuple)
):
num_returns = len(out_ref_components[0])
# ensure we get the same number of returns for each invocation
assert all(len(o) == num_returns for o in out_ref_components)
# construct NJTs from same index returns from each invocation
njt_returns = [
torch.nested.as_nested_tensor(
[o[r] for o in out_ref_components], layout=torch.jagged
)
for r in range(num_returns)
]
return type(out_ref_components[0])(njt_returns)
return torch.nested.as_nested_tensor(out_ref_components, layout=torch.jagged)
return out_ref_components
# Computes the reference value for a non-reduction unary op with dim-wise application.
def unary_dimwise_reference(op, sample, batchwise_reference=None):
# extract info about the dim args this op supports
assert op._extra_op_data.dim_args is not None
single_dim_argname, dimlist_argname = op._extra_op_data.get_dim_argnames()
# only support a single non-list dim arg for now
assert dimlist_argname is None
assert single_dim_argname is not None
if sample.kwargs[single_dim_argname] == 0:
# unbind reference won't work for batch-wise operation; handle this case here
assert batchwise_reference is not None
return batchwise_reference(op, sample)
return unbind_reference(op, sample)
# Computes the reference value for a reduction op.
def reduction_reference(op, sample):
assert sample.input.is_nested
# extract info about the dim args this op supports
assert op._extra_op_data.dim_args is not None
single_dim_argname, dimlist_argname = op._extra_op_data.get_dim_argnames()
assert single_dim_argname is not None
dim = sample.kwargs.get(
dimlist_argname, sample.kwargs.get(single_dim_argname, None)
)
keepdim = sample.kwargs.get("keepdim", False)
assert dim != 0, "reductions over just the batch dim are not supported"
if isinstance(dim, (tuple, list)):
reduce_on_ragged = sample.input._ragged_idx in dim
reduce_on_batch = 0 in dim
else:
reduce_on_ragged = sample.input._ragged_idx == dim
reduce_on_batch = dim == 0
if dim is None:
# calculate reference value by running reduction on values buffer
return op.op(sample.input.values(), *sample.args, **sample.kwargs)
if reduce_on_ragged and reduce_on_batch:
# run reference directly on buffer with dims converted to inner space
from torch.nested._internal.ops import _outer_to_inner_dim
ref_kwargs = dict(sample.kwargs)
assert dimlist_argname is not None
ref_kwargs[dimlist_argname] = _outer_to_inner_dim(
sample.input.dim(), dim, sample.input._ragged_idx, canonicalize=True
)
out = op.op(sample.input.values(), *sample.args, **ref_kwargs)
if keepdim:
if isinstance(out, (tuple, list)):
# some ops return multiple things; unsqueeze all of them
out = type(out)(o.unsqueeze(0) for o in out)
else:
out = out.unsqueeze(0)
return out
if reduce_on_ragged and not reduce_on_batch:
# calculate reference value by running an unbind reference and stacking
out_ref_components = unbind_reference(op, sample, wrap_output_as_njt=False)
if len(out_ref_components) > 0 and isinstance(
out_ref_components[0], (tuple, list)
):
# some ops return multiple things; stack all of them
num_returns = len(out_ref_components[0])
# ensure we get the same number of returns for each invocation
assert all(len(o) == num_returns for o in out_ref_components)
# stack same index returns from each invocation
stacked_returns = [
torch.stack([o[r] for o in out_ref_components], dim=0)
for r in range(num_returns)
]
return type(out_ref_components[0])(stacked_returns)
return torch.stack(out_ref_components, dim=0)
# unbind reference works for other reductions
return unbind_reference(op, sample)
def sample_inputs_elementwise_njt_unary(
op_info, device, dtype, requires_grad, op_kwargs=None, **kwargs
):
if not op_kwargs:
op_kwargs = {}
for njt in _sample_njts(
device=device, dtype=dtype, requires_grad=requires_grad, dims=[2, 3, 4]
):
yield SampleInput(njt, kwargs=dict(op_kwargs), name=_describe_njt(njt))
def sample_inputs_elementwise_njt_binary(
op_info, device, dtype, requires_grad, op_kwargs=None, **kwargs
):
if not op_kwargs:
op_kwargs = {}
for njt1 in _sample_njts(
device=device, dtype=dtype, requires_grad=requires_grad, dims=[2, 3, 4]
):
njt_desc = _describe_njt(njt1)
njt2 = torch.randn_like(njt1)
yield SampleInput(
_clone(njt1),
args=(njt2,),
kwargs=dict(op_kwargs),
name=f"{njt_desc}: (NT, NT)",
)
# broadcasting case: (B, j0, ...) with (B, 1, ...)
dense_shape = list(njt1.shape)
dense_shape[njt1._ragged_idx] = 1
t = torch.randn(
dense_shape,
device=device,
dtype=dtype,
requires_grad=requires_grad,
)
t2 = _clone(t)
# used for slicing in unbind_reference()
t._batch_dim = 0
t2._batch_dim = 0
# (NT, T)
yield SampleInput(
_clone(njt1),
args=(t,),
kwargs=dict(op_kwargs),
name=f"{njt_desc}: (NT, T) broadcasting 1 over ragged",
)
# (T, NT)
yield SampleInput(
t2,
args=(_clone(njt1),),
kwargs=dict(op_kwargs),
name=f"{njt_desc}: (T, NT) broadcasting 1 over ragged",
)
# broadcasting case: (B, j0, ...) with (1, 1...)
t = torch.randn(
[1 for _ in range(njt1.dim())],
device=device,
dtype=dtype,
requires_grad=requires_grad,
)
t2 = _clone(t)
# used for slicing in unbind_reference()
t._batch_dim = 0
t2._batch_dim = 0
# (NT, T)
yield SampleInput(
_clone(njt1),
args=(t,),
kwargs=dict(op_kwargs),
name=f"{njt_desc}: (NT, T) broadcasting all 1s",
)
# (T, NT)
yield SampleInput(
t2,
args=(_clone(njt1),),
kwargs=dict(op_kwargs),
name=f"{njt_desc}: (T, NT) broadcasting all 1s",
)
# broadcasting case: (B, j0, ...) with (...)
if njt1.dim() > njt1._ragged_idx + 1:
t = torch.randn(
njt1.shape[njt1._ragged_idx + 1 :],
device=device,
dtype=dtype,
requires_grad=requires_grad,
)
# (NT, T)
yield SampleInput(
_clone(njt1),
args=(_clone(t),),
kwargs=dict(op_kwargs),
name=f"{njt_desc}: (NT, T) broadcasting normal dims",
)
# (T, NT)
yield SampleInput(
_clone(t),
args=(_clone(njt1),),
kwargs=dict(op_kwargs),
name=f"{njt_desc}: (T, NT) broadcasting normal dims",
)
# broadcasting case: (B, j0, ...) with scalar
t = torch.randn((), device=device, dtype=dtype, requires_grad=requires_grad)
# (NT, T)
yield SampleInput(
_clone(njt1),
args=(_clone(t),),
kwargs=dict(op_kwargs),
name=f"{njt_desc}: (NT, T) broadcasting with scalar",
)
# (T, NT)
yield SampleInput(
_clone(t),
args=(_clone(njt1),),
kwargs=dict(op_kwargs),
name=f"{njt_desc}: (T, NT) broadcasting with scalar",
)
# mixed broadcasting case: (B, j0, 1) with (B, 1, D)
B = 4
D = 16
njt = random_nt_from_dims(
(B, None, 1),
device=device,
dtype=dtype,
requires_grad=requires_grad,
layout=torch.jagged,
)
njt_desc = _describe_njt(njt)
t = torch.randn(B, 1, D, device=device, dtype=dtype, requires_grad=requires_grad)
t2 = _clone(t)
# used for slicing in unbind_reference()
t._batch_dim = 0
t2._batch_dim = 0
# (NT, T)
yield SampleInput(
_clone(njt),
args=(t,),
kwargs=dict(op_kwargs),
name=f"{njt_desc}: (NT, T) mixed broadcasting",
)
# (T, NT)
yield SampleInput(
t2,
args=(_clone(njt),),
kwargs=dict(op_kwargs),
name=f"{njt_desc}: (T, NT) mixed broadcasting",
)
def sample_inputs_njt_reduction(
op_info,
device,
dtype,
requires_grad,
supports_keepdim=True,
op_kwargs=None,
**kwargs,
):
if not op_kwargs:
op_kwargs = {}
# extract info about the dim args this op supports
assert op_info._extra_op_data.dim_args is not None
(
single_dim_argname,
dimlist_argname,
) = op_info._extra_op_data.get_dim_argnames()
assert single_dim_argname is not None
supports_dimlist = dimlist_argname is not None
for njt in _sample_njts(
device=device, dtype=dtype, requires_grad=requires_grad, dims=[2, 3, 4]
):
njt_desc = _describe_njt(njt)
keepdim_values = [False, True] if supports_keepdim else [None]
for keepdim in keepdim_values:
keepdim_suffix = f" with keepdim={keepdim}" if supports_keepdim else ""
# single dim-wise reduction; includes reduction over the ragged dim
# NB: reduction over the batch dim is not supported!
# TODO: Cover this in the set of error inputs
for dim in range(1, njt.dim()):
dim_desc = "normal" if dim != njt._ragged_idx else "ragged"
yield SampleInput(
_clone(njt),
kwargs={
**op_kwargs,
single_dim_argname: dim,
**({"keepdim": keepdim} if supports_keepdim else {}),
},
name=f"{njt_desc}: {dim_desc} dim reduction{keepdim_suffix}",
)
if supports_dimlist:
# reduce on both batch and ragged dims
yield SampleInput(
_clone(njt),
kwargs={
**op_kwargs,
dimlist_argname: [0, njt._ragged_idx],
**({"keepdim": keepdim} if supports_keepdim else {}),
},
name=f"{njt_desc}: batch+ragged reduction{keepdim_suffix}",
)
# reduce on batch, ragged, and other dims
for other_dim in range(njt._ragged_idx + 1, njt.dim()):
yield SampleInput(
_clone(njt),
kwargs={
**op_kwargs,
dimlist_argname: [0, njt._ragged_idx, other_dim],
**({"keepdim": keepdim} if supports_keepdim else {}),
},
name=(
f"{njt_desc}: batch+ragged+dim={other_dim} "
f"reduction{keepdim_suffix}"
),
)
# reduce on two non-ragged, non-batch dims
if njt.dim() > 3 and njt._ragged_idx == 1:
yield SampleInput(
_clone(njt),
kwargs={
**op_kwargs,
dimlist_argname: [njt.dim() - 2, njt.dim() - 1],
**({"keepdim": keepdim} if supports_keepdim else {}),
},
name=f"{njt_desc}: two normal dim reduction{keepdim_suffix}",
)
# full reduction by specifying all dims
yield SampleInput(
_clone(njt),
kwargs={
**op_kwargs,
dimlist_argname: list(range(njt.dim())),
**({"keepdim": keepdim} if supports_keepdim else {}),
},
name=f"{njt_desc}: all dim reduction{keepdim_suffix}",
)
# TODO: Reducing on ragged dim and non-batch dim is not supported;
# cover this in the set of error inputs.
# full reduction
yield SampleInput(
_clone(njt),
kwargs=dict(op_kwargs),
name=f"{njt_desc}: full reduction with keepdim={keepdim}",
)
def unsupported_sample_inputs_func(op_name):
def _f(op_info, device, dtype, requires_grad, op_name=op_name, **kwargs):
raise RuntimeError(
f"OpInfo for {op_name} does not support NJT. Support can be added by modifying "
"torch/testing/_internal/opinfo/definitions/nested.py."
)
return _f
def unsupported_reference(op_name):
def _f(op, sample):
raise RuntimeError(
f"OpInfo for {op_name} does not define a ref() function. Support can be added by "
"modifying torch/testing/_internal/opinfo/definitions/nested.py."
)
return _f
# === BEGIN OP-SPECIFIC SAMPLE INPUTS FUNCS / REFERENCES ===
def sample_inputs_unary_dimwise(
op_info, device, dtype, requires_grad, op_kwargs=None, **kwargs
):
if op_kwargs is None:
op_kwargs = {}
# only support a single non-list dim arg for now
assert op_info._extra_op_data is not None
single_dim_argname, dimlist_argname = op_info._extra_op_data.get_dim_argnames()
assert single_dim_argname is not None
assert dimlist_argname is None
for njt in _sample_njts(
device=device, dtype=dtype, requires_grad=requires_grad, dims=[2, 3, 4]
):
for dim in range(njt.dim()):
kwargs = {single_dim_argname: dim}
kwargs.update(op_kwargs)
yield SampleInput(
_clone(njt),
kwargs=kwargs,
name=f"{_describe_njt(njt)}: {_describe_dim(njt, dim)}",
)
def batchwise_reference_chunk(op, sample):
# reference for chunk() over dim=0
B = sample.input.size(0)
num_chunks = sample.kwargs["chunks"]
chunk_size = math.ceil(B / num_chunks)
num_full_chunks = B // chunk_size
chunk_sizes = [chunk_size for _ in range(num_full_chunks)]
if B % chunk_size != 0:
# final chunk contains the leftovers
chunk_sizes.append(B % chunk_size)
# split unbound components into chunks according to calculated sizes
components = list(sample.input.unbind())
start = 0
chunks = []
for chunk_size in chunk_sizes:
chunks.append(components[start : start + chunk_size])
start += chunk_size
# rejoin into NJT outputs
return [torch.nested.as_nested_tensor(lst, layout=torch.jagged) for lst in chunks]
def batchwise_reference_narrow(op, sample):
# TODO: write this!
raise NotImplementedError
def batchwise_reference_select(op, sample):
# reference for select() over dim=0
return sample.input.unbind()[sample.kwargs["index"]]
def batchwise_reference_split(op, sample):
# TODO: write this!
raise NotImplementedError
def batchwise_reference_split_with_sizes(op, sample):
# TODO: write this!
raise NotImplementedError
def batchwise_reference_unflatten(op, sample):
# TODO: write this!
raise NotImplementedError
def batchwise_reference_unsqueeze(op, sample):
raise ValueError("unsqueeze() is not intended to operate on the batch dim")
def sample_inputs_clone(op_info, device, dtype, requires_grad, **kwargs):
# non-contiguous NJTs
for njt in _sample_njts(
device=device, dtype=dtype, requires_grad=requires_grad, dims=[2, 3, 4]
):
yield SampleInput(njt, name=_describe_njt(njt))
for memory_format in (torch.contiguous_format, torch.preserve_format):
# construct a "non-contiguous with holes" NJT
values = torch.randn(
10, 5, device=device, dtype=dtype, requires_grad=requires_grad
)
offsets = torch.tensor([0, 2, 4, 10], device=device, dtype=torch.int64)
lengths = torch.tensor([2, 1, 3], device=device, dtype=torch.int64)
njt = torch.nested.nested_tensor_from_jagged(
values, offsets=offsets, lengths=lengths
)
njt_desc = _describe_njt(njt)
yield SampleInput(
njt,
kwargs={"memory_format": memory_format},
name=f"{njt_desc}: {memory_format})",
)
def sample_inputs_fill(op_info, device, dtype, requires_grad, **kwargs):
# scalar case
unary_func = partial(sample_inputs_elementwise_njt_unary, op_kwargs={"value": 42.0})
yield from unary_func(op_info, device, dtype, requires_grad)
# TODO: add Tensor case
def sample_inputs_mvl_gamma(p):
return partial(sample_inputs_elementwise_njt_unary, op_kwargs={"p": p})
def sample_inputs_polygamma_n(n):
return partial(sample_inputs_elementwise_njt_unary, op_kwargs={"n": n})
def sample_inputs_special_polygamma_n(n):
return partial(sample_inputs_elementwise_njt_unary, op_kwargs={"n": n})
def sample_inputs_to(op_info, device, dtype, requires_grad, op_kwargs=None, **kwargs):
for njt in _sample_njts(
device=device,
dtype=dtype,
requires_grad=requires_grad,
dims=[2, 3, 4],
):
other_dtypes = (
d for d in (torch.float32, torch.half, torch.double) if d is not dtype
)
for other_dtype in other_dtypes:
sample_name = f"{njt.dim()}D: {dtype} -> {other_dtype}"
yield SampleInput(_clone(njt), kwargs={"dtype": dtype}, name=sample_name)
# only include device transfer for CUDA inputs
if "cuda" in device:
other_device = "cpu"
sample_name = f"{_describe_njt(njt)}: {device} -> {other_device}"
yield SampleInput(
_clone(njt), kwargs={"device": other_device}, name=sample_name
)
def sample_inputs_bmm(op_info, device, dtype, requires_grad, op_kwargs=None, **kwargs):
for njt_3d in _sample_njts(
device=device, dtype=dtype, requires_grad=requires_grad, dims=[3]
):
# (B, j1, D) x (B, D, E) => (B, j1, E)
if njt_3d._ragged_idx == 1:
B, D = njt_3d.shape[0], njt_3d.shape[-1]
E = D + 2
other = torch.randn(B, D, E, device=device, dtype=dtype)
# used for slicing in unbind_reference()
other._batch_dim = 0
njt_desc = _describe_njt(njt_3d)
yield SampleInput(
_clone(njt_3d),
kwargs={"mat2": other},
name=f"{njt_desc}: (B, j, D) x (B, D, E)",
)
# TODO (need factory functions):
# (B, D, j1) x (B, j1, E) => (B, D, E)
def reference_bmm(op, sample):
# unbind reduces a dim and bmm requires 3D, so use matmul as the reference
matmul_op = copy(op)
matmul_op.op = torch.matmul
# change arg name from mat2 -> other
modified_sample = copy(sample)
other = modified_sample.kwargs["mat2"]
del modified_sample.kwargs["mat2"]
modified_sample.kwargs["other"] = other
return unbind_reference(matmul_op, modified_sample)
def sample_inputs_chunk(op_info, device, dtype, requires_grad, **kwargs):
for sample_input in sample_inputs_unary_dimwise(
op_info, device, dtype, requires_grad, **kwargs
):
# ragged dim chunking: test a single chunks value
if sample_input.kwargs["dim"] == sample_input.input._ragged_idx:
yield _update_sample(sample_input, {"chunks": 3})
# other dim chunking: test different chunks values
else:
D = sample_input.input.size(sample_input.kwargs["dim"])
for chunks in [1, D // 2, D - 1, D]:
yield _update_sample(sample_input, {"chunks": chunks})
def sample_inputs_matmul(
op_info, device, dtype, requires_grad, op_kwargs=None, **kwargs
):
# also run bmm samples through
for sample_input in sample_inputs_bmm(op_info, device, dtype, requires_grad):
# change arg name from mat2 -> other
other = sample_input.kwargs["mat2"]
del sample_input.kwargs["mat2"]
sample_input.kwargs["other"] = other
yield sample_input
# 3D cases not covered by bmm
for njt_3d in _sample_njts(
device=device, dtype=dtype, requires_grad=requires_grad, dims=[3]
):
# (B, j1, D) x (D, E) => (B, j1, E)
if njt_3d._ragged_idx == 1:
D = njt_3d.shape[-1]
E = D + 2
njt_desc = _describe_njt(njt_3d)
yield SampleInput(
_clone(njt_3d),
kwargs={"other": torch.randn(D, E, device=device, dtype=dtype)},
name=f"{njt_desc}: (B, j, D) x (D, E)",
)
# 4D cases
for njt_4d in _sample_njts(
device=device, dtype=dtype, requires_grad=requires_grad, dims=[4]
):
# (B, j1, D, E) x (E, F) => (B, j1, D, F)
if njt_4d._ragged_idx == 1:
E = njt_4d.shape[-1]
F = E + 2
njt_desc = _describe_njt(njt_4d)
yield SampleInput(
_clone(njt_4d),
kwargs={"other": torch.randn(E, F, device=device, dtype=dtype)},
name=f"{njt_desc}: (B, j, D, E) x (E, F)",
)
# Dense x NJT cases
for njt_3d in _sample_njts(
device=device,
dtype=dtype,
requires_grad=requires_grad,
dims=[3],
):
# (B, F, E) x (B, E, j1) => (B, F, j1)
if njt_3d._ragged_idx == 2:
B = njt_3d.shape[0]
E = njt_3d.shape[1]
F = E + 2
njt_desc = _describe_njt(njt_3d)
dense_t = torch.randn(
B, F, E, device=device, dtype=dtype, requires_grad=requires_grad
)
dense_t._batch_dim = 0 # for unbind_reference()
yield SampleInput(
dense_t,
args=(_clone(njt_3d),),
name=f"{njt_desc}: (B, F, E) x (B, E, j1)",
)
# NJT x NJT => Dense case
for njt_3d in _sample_njts(
device=device,
dtype=dtype,
requires_grad=requires_grad,
dims=[3],
):
# (B, E, j1) x (B, j1, F) => (B, E, F)
if njt_3d._ragged_idx == 2 and njt_3d.is_contiguous():
B, E, _ = njt_3d.shape
sum_j1 = len(njt_3d.values())
other_cont = torch.randn(
sum_j1, E + 2, device=device, dtype=dtype, requires_grad=requires_grad
)
other_njt = torch.nested.nested_tensor_from_jagged(
other_cont, njt_3d.offsets(), lengths=njt_3d._lengths
)
njt_desc = _describe_njt(njt_3d)
yield SampleInput(
_clone(njt_3d),
kwargs={"other": _clone(other_njt)},
name=f"{njt_desc}: (B, E, j1) x (B, j1, F)",
)
# TODO (need factory functions):
# (B, j1, D, E) x (B, j1, E, F) => (B, j1, D, F)
def sample_inputs_masked_select(
op_info, device, dtype, requires_grad, op_kwargs=None, **kwargs
):
for njt in _sample_njts(
device=device, dtype=dtype, requires_grad=requires_grad, dims=[2]
):
yield SampleInput(
njt,
kwargs={"mask": (torch.randn_like(njt, requires_grad=False) < 0.0)},
name=_describe_njt(njt),
)
def sample_inputs_narrow(op_info, device, dtype, requires_grad, **kwargs):
for sample_input in sample_inputs_unary_dimwise(
op_info, device, dtype, requires_grad, **kwargs
):
# ragged dim narrowing: test a single start, length value
if sample_input.kwargs["dim"] == sample_input.input._ragged_idx:
yield _update_sample(sample_input, {"start": 1, "length": 2})
# other dim narrowing: test different start, length values
else:
D = sample_input.input.size(sample_input.kwargs["dim"])
for start, length in [(0, D), (0, D - 1), (1, D - 1), (D - 1, 1)]:
yield _update_sample(sample_input, {"start": start, "length": length})
def sample_inputs_nn_functional_embedding(
op_info, device, dtype, requires_grad, **kwargs
):
indices = torch.nested.nested_tensor(
[
torch.tensor([0, 2, 1, 3]),
torch.tensor([4, 2, 1]),
torch.tensor([6, 7, 5, 2, 4]),
],
layout=torch.jagged,
dtype=torch.int64,
device=device,
)
NUM_EMBEDDINGS = 20
EMBEDDING_DIM = 32
weight = torch.randn(NUM_EMBEDDINGS, EMBEDDING_DIM, device=device, dtype=dtype)
# NB: the OpInfo entry for embedding_bag expects weight first so the gradients
# can be checked
yield SampleInput(
_clone(weight).requires_grad_(),
args=(indices,),
)
yield SampleInput(
_clone(weight).requires_grad_(),
args=(indices,),
kwargs={"padding_idx": 1},
)
def sample_inputs_index_put(
op_info, device, dtype, requires_grad, op_kwargs=None, **kwargs
):
for njt in _sample_njts(
device=device, dtype=dtype, requires_grad=requires_grad, dims=[2, 3, 4]
):
for dim in range(njt.dim()):
indices = [
torch.tensor(list(range(njt.size(0))), device=njt.device),
*[
torch.tensor([0] * njt.size(0), device=njt.device)
for _ in range(dim - 1)
],
]
njt_desc = _describe_njt(njt)
yield SampleInput(
_clone(njt),
kwargs={
"indices": indices,
"values": torch.tensor(1.0, device=njt.device),
},
name=f"{njt_desc}: up to dim {dim - 1}",
)
# Non-cont NJT for completeness
offsets = torch.tensor([0, 2, 5, 7], device=device)
lengths = torch.tensor([2, 2, 2], device=device)
indices = [
torch.tensor([0, 1, 2], device=device),
torch.tensor([0, 1, 1], device=device),
torch.tensor([0, 0, 0], device=device),
]
a = torch.nested.nested_tensor_from_jagged(
torch.zeros(7, 3, device=device), offsets, lengths
).requires_grad_(requires_grad)
njt_desc = _describe_njt(a)
yield SampleInput(
_clone(a),
kwargs={"indices": indices, "values": torch.tensor(1.0, device=a.device)},
name=f"{njt_desc}: all dims",
)
def sample_inputs_nn_functional_embedding_bag(
op_info, device, dtype, requires_grad, **kwargs
):
for generate_per_sample_weight in (True, False):
for mode in ("sum", "mean", "max"):
# per_sample_weights is only supported for mode='sum'
if mode != "sum" and generate_per_sample_weight:
continue
NUM_EMBEDDINGS = 10
EMBEDDING_DIM = 32
weight = torch.randn(
NUM_EMBEDDINGS, EMBEDDING_DIM, dtype=dtype, device=device
)
njt = torch.nested.nested_tensor(
[
torch.randint(0, NUM_EMBEDDINGS, size=(2,)),
torch.randint(0, NUM_EMBEDDINGS, size=(3,)),
torch.randint(0, NUM_EMBEDDINGS, size=(4,)),
],
layout=torch.jagged,
dtype=torch.int64,
device=device,
)
per_sample_weights = None
if generate_per_sample_weight:
per_sample_weights = torch.randn_like(njt, dtype=dtype)
# NB: the OpInfo entry for embedding_bag expects weight first so the gradients
# can be checked
yield SampleInput(
weight,
args=(njt,),
kwargs={
"mode": mode,
"per_sample_weights": per_sample_weights,
},
)
def reference_nn_functional_embedding_bag(op, sample):
# run reference on a single bag at a time
new_kwargs = dict(sample.kwargs)
new_kwargs.update(
{"offsets": torch.tensor([0], dtype=torch.int64, device=sample.input.device)}
)
# flip input / weight back to what unbind_reference() expects
sample = SampleInput(sample.args[0], args=(sample.input,), kwargs=new_kwargs)
old_op = op.op
op.op = torch.nn.functional.embedding_bag
output = unbind_reference(op, sample, wrap_output_as_njt=False)
op.op = old_op
# concat bag outputs to get final output
return torch.cat(output, dim=0)
def sample_inputs_nn_functional_linear(op_info, device, dtype, requires_grad, **kwargs):
for njt in _sample_njts(
device=device, dtype=dtype, requires_grad=requires_grad, dims=[3, 4, 5]
):
# projection over a ragged dim is not currently supported
if is_nested_int(njt.size(-1)):
continue
# with bias
NUM_OUTPUT = 10
weight = torch.randn(
NUM_OUTPUT,
njt.size(-1),
device=device,
dtype=dtype,
requires_grad=requires_grad,
)
bias = torch.randn(
NUM_OUTPUT, device=device, dtype=dtype, requires_grad=requires_grad
)
yield SampleInput(
_clone(njt),
kwargs={
"weight": _clone(weight),
"bias": _clone(bias),
},
name=f"{_describe_njt(njt)}: with bias",
)
# without bias
yield SampleInput(
_clone(njt),
kwargs={
"weight": _clone(weight),
},
name=f"{_describe_njt(njt)}: without bias",
)
def sample_inputs_nn_functional_prelu(op_info, device, dtype, requires_grad, **kwargs):
for njt in _sample_njts(
device=device, dtype=dtype, requires_grad=requires_grad, dims=[3, 4]
):
# Second dim is interpreted as number of channels; this should be non-ragged for now
num_channels = njt.size(1)
if is_nested_int(num_channels):
continue
# 1D weight
weight = torch.randn(
num_channels,
device=device,
dtype=dtype,
requires_grad=requires_grad,
)
yield SampleInput(
_clone(njt),
kwargs={
"weight": _clone(weight),
},
name=f"{_describe_njt(njt)}: 1D weight",
)
# scalar tensor weight
yield SampleInput(
_clone(njt),
kwargs={
"weight": torch.tensor(4.2, device=device, dtype=dtype),
},
name=f"{_describe_njt(njt)}: scalar tensor weight",
)
def sample_inputs_nn_functional_rms_norm(
op_info, device, dtype, requires_grad, **kwargs
):
for njt in _sample_njts(
device=device, dtype=dtype, requires_grad=requires_grad, dims=[3, 4]
):
# normalize over non-ragged dims
for start_dim in range(njt.dim()):
if start_dim <= njt._ragged_idx:
continue
normalized_shape = njt.shape[start_dim:]
weight = torch.randn(
normalized_shape,
device=device,
dtype=dtype,
requires_grad=requires_grad,
)
yield SampleInput(
_clone(njt),
kwargs={
"normalized_shape": normalized_shape,
"weight": weight,
},
name=f"{_describe_njt(njt)}",
)
sample_inputs_nn_functional_threshold = partial(
sample_inputs_elementwise_njt_unary,
op_kwargs={"threshold": float.fromhex("0x1.3ap-3"), "value": -9},
)
def sample_inputs_select(op_info, device, dtype, requires_grad, **kwargs):
for sample_input in sample_inputs_unary_dimwise(
op_info, device, dtype, requires_grad, **kwargs
):
# ragged dim chunking: test a single index
if sample_input.kwargs["dim"] == sample_input.input._ragged_idx:
yield _update_sample(sample_input, {"index": 0})
# other dim chunking: test different indices
else:
D = sample_input.input.size(sample_input.kwargs["dim"])
for index in [0, D // 2, D - 1]:
yield _update_sample(sample_input, {"index": index})
def sample_inputs_split(op_info, device, dtype, requires_grad, **kwargs):
for sample_input in sample_inputs_unary_dimwise(
op_info, device, dtype, requires_grad, **kwargs
):
# ragged dim chunking: test a single split size
if sample_input.kwargs["dim"] == sample_input.input._ragged_idx:
yield _update_sample(sample_input, {"split_size_or_sections": 3})
# other dim chunking: test different split sizes
else:
D = sample_input.input.size(sample_input.kwargs["dim"])
for split_size in [1, D // 2, D - 1, D]:
yield _update_sample(
sample_input, {"split_size_or_sections": split_size}
)
def sample_inputs_split_with_sizes(op_info, device, dtype, requires_grad, **kwargs):
for sample_input in sample_inputs_unary_dimwise(
op_info, device, dtype, requires_grad, **kwargs
):
# It will never make sense to operate on the ragged dim.
# TODO: Handle this with error_inputs
if sample_input.kwargs["dim"] == sample_input.input._ragged_idx:
continue
D = sample_input.input.size(sample_input.kwargs["dim"])
# splits should add up to D
split1 = torch.randint(0, D - 1, size=()).item()
split2 = D - split1
yield _update_sample(sample_input, {"split_sizes": [split1, split2]})
def sample_inputs_squeeze(op_info, device, dtype, requires_grad, **kwargs):
# squeeze-specific NJT generator (need to ensure there are some 1s in the shape)
def _get_njts():
njt = random_nt_from_dims(
(4, None, 1, 3, 1),
device=device,
dtype=dtype,
requires_grad=requires_grad,
layout=torch.jagged,
)
yield njt
# without min / max seqlen cached
values = njt.values().detach().clone()
offsets = njt.offsets().detach().clone()
yield torch.nested.nested_tensor_from_jagged(values, offsets)
# non-contiguous transposed
yield njt.transpose(1, 3)
# non-contiguous with holes
values = njt.values().detach().clone()
offsets = njt.offsets().detach().clone()
# subtract 1 to cause holes
lengths = (offsets.diff() - 1).detach().clone()
yield torch.nested.nested_tensor_from_jagged(
values=values,
offsets=offsets,
lengths=lengths,
)
for njt in _get_njts():
# single dim operation
for dim in range(njt.dim()):
# Operation on batch / ragged dim is never expected to work.
# TODO: Handle these via error_inputs.
if dim == 0 or dim == njt._ragged_idx:
continue
yield SampleInput(
_clone(njt),
kwargs={"dim": dim},
name=f"{_describe_njt(njt)}: {_describe_dim(njt, dim)}",
)
# multiple dim operation (pass no args)
yield SampleInput(
_clone(njt),
kwargs={"dim": dim},
name=f"{_describe_njt(njt)}: multiple dims",
)
def sample_inputs_unflatten(op_info, device, dtype, requires_grad, **kwargs):
for sample_input in sample_inputs_unary_dimwise(
op_info, device, dtype, requires_grad, **kwargs
):
# It will never make sense to operate on the ragged dim.
# TODO: Handle this with error_inputs
if sample_input.kwargs["dim"] == sample_input.input._ragged_idx:
continue
D = sample_input.input.size(sample_input.kwargs["dim"])
# sizes should multiply to be D
yield _update_sample(sample_input, {"sizes": [D, 1]})
yield _update_sample(sample_input, {"sizes": [1, D]})
if D % 2 == 0:
yield _update_sample(sample_input, {"sizes": [D // 2, 2]})
yield _update_sample(sample_input, {"sizes": [2, D // 2]})
def sample_inputs_unsqueeze(op_info, device, dtype, requires_grad, **kwargs):
for sample_input in sample_inputs_unary_dimwise(
op_info, device, dtype, requires_grad, **kwargs
):
yield sample_input
last_dim_sample = _update_sample(sample_input, {"dim": -1})
last_dim_sample.name = (
f"{_describe_njt(last_dim_sample.input)}: add dim to the end"
)
# Tell the unbind reference how to canonicalize the dim kwargs
# This is necessary because unsqueeze() allows for a dim after
# the last dim to indicate an unsqueeze at the end.
last_dim_sample.input._ndim = last_dim_sample.input.dim() + 1
yield last_dim_sample
def sample_inputs_where(op_info, device, dtype, requires_grad, **kwargs):
for sample in sample_inputs_elementwise_njt_binary(
op_info, device, dtype, requires_grad, **kwargs
):
other = sample.args[0]
sample.args = ()
sample.kwargs["other"] = other
sample.kwargs["condition"] = sample.input > 0.0
sample.name = sample.name.replace("(", "(NT, ")
yield sample
# === END OP-SPECIFIC SAMPLE INPUTS FUNCS / REFERENCES ===
# Mapping of OpInfo full names -> sample_inputs_funcs, which define the set of sample inputs
# (involving NJTs) to pass to the op. Full name consists of the OpInfo's name and variant name
# separated by a period (e.g. special.polygamma.special_polygamma_n_0). These are necessary
# to specify if they cannot be auto-generated for some reason. Try to keep these sorted
# in alphabetical order!
njt_sample_inputs = {
"bmm": sample_inputs_bmm,
"chunk": sample_inputs_chunk,
"clone": sample_inputs_clone,
"count_nonzero": partial(sample_inputs_njt_reduction, supports_keepdim=False),
"fill": sample_inputs_fill,
**{f"mvlgamma.mvlgamma_p_{p}": sample_inputs_mvl_gamma(p=1) for p in (1, 3, 5)},
"nn.functional.embedding": sample_inputs_nn_functional_embedding,
"nn.functional.embedding_bag": sample_inputs_nn_functional_embedding_bag,
"nn.functional.linear": sample_inputs_nn_functional_linear,
"nn.functional.prelu": sample_inputs_nn_functional_prelu,
"nn.functional.rms_norm": sample_inputs_nn_functional_rms_norm,
"nn.functional.threshold": sample_inputs_nn_functional_threshold,
**{f"polygamma.polygamma_n_{n}": sample_inputs_polygamma_n(n=n) for n in range(5)},
"special.polygamma.special_polygamma_n_0": sample_inputs_special_polygamma_n(n=0),
"to": sample_inputs_to,
"matmul": sample_inputs_matmul,
"masked_select": sample_inputs_masked_select,
"narrow": sample_inputs_narrow,
"index_put": sample_inputs_index_put,
# these two don't have ReductionOpInfo entries
"max.reduction_with_dim": sample_inputs_njt_reduction,
"min.reduction_with_dim": sample_inputs_njt_reduction,
"select": sample_inputs_select,
"split": sample_inputs_split,
"split_with_sizes": sample_inputs_split_with_sizes,
"squeeze": sample_inputs_squeeze,
"unflatten": sample_inputs_unflatten,
"unsqueeze": sample_inputs_unsqueeze,
"where": sample_inputs_where,
}
njt_references = {
"bmm": reference_bmm,
"chunk": partial(
unary_dimwise_reference, batchwise_reference=batchwise_reference_chunk
),
"count_nonzero": reduction_reference,
# these two don't have ReductionOpInfo entries
"max.reduction_with_dim": reduction_reference,
"min.reduction_with_dim": reduction_reference,
"narrow": partial(
unary_dimwise_reference, batchwise_reference=batchwise_reference_narrow
),
"select": partial(
unary_dimwise_reference, batchwise_reference=batchwise_reference_select
),
"split": partial(
unary_dimwise_reference, batchwise_reference=batchwise_reference_split
),
"split_with_sizes": partial(
unary_dimwise_reference,
batchwise_reference=batchwise_reference_split_with_sizes,
),
"squeeze": unbind_reference,
"nn.functional.embedding_bag": reference_nn_functional_embedding_bag,
"unflatten": partial(
unary_dimwise_reference, batchwise_reference=batchwise_reference_unflatten
),
"unsqueeze": partial(
unary_dimwise_reference, batchwise_reference=batchwise_reference_unsqueeze
),
}
# Translates an OpInfo entry to one that operates on NJTs.
def translate_opinfo(op):
new_op = copy(op)
new_op.supports_njt = True
# add some extra info for use in generating tests on the right subset of ops
new_op._extra_op_data = extra_op_data.get(op.full_name, ExtraOpData())
if op.full_name in njt_sample_inputs:
new_op.sample_inputs_func = njt_sample_inputs[op.full_name]
new_op.ref = njt_references.get(op.full_name, unbind_reference)
elif isinstance(op, UnaryUfuncInfo):
new_op.sample_inputs_func = partial(
sample_inputs_elementwise_njt_unary, op_kwargs=None
)
new_op.ref = unbind_reference
elif isinstance(op, BinaryUfuncInfo):
new_op.sample_inputs_func = partial(
sample_inputs_elementwise_njt_binary, op_kwargs=None
)
new_op.ref = unbind_reference
elif isinstance(op, ReductionOpInfo):
new_op.sample_inputs_func = partial(sample_inputs_njt_reduction, op_kwargs=None)
new_op.ref = reduction_reference
# TODO: Translate the rest of the OpInfos
else:
new_op.sample_inputs_func = unsupported_sample_inputs_func(op.full_name)
new_op.ref = unsupported_reference(op.full_name)
new_op.supports_njt = False
return new_op
njt_op_db = [translate_opinfo(op) for op in op_db]
```
|
============================================================================================================================================
SOURCE CODE FILE: signal.py
LINES: 1
SIZE: 15.43 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\opinfo\definitions\signal.py
ENCODING: utf-8
```py
# mypy: ignore-errors
import unittest
from functools import partial
from itertools import product
from typing import Callable
import numpy
import torch
from torch.testing._internal.common_dtype import floating_types
from torch.testing._internal.common_utils import TEST_SCIPY
from torch.testing._internal.opinfo.core import (
DecorateInfo,
ErrorInput,
OpInfo,
SampleInput,
)
if TEST_SCIPY:
import scipy.signal
def sample_inputs_window(op_info, device, dtype, requires_grad, *args, **kwargs):
r"""Base function used to create sample inputs for windows.
For additional required args you should use *args, as well as **kwargs for
additional keyword arguments.
"""
# Remove include_conjugated_inputs from kwargs
kwargs.pop("include_conjugated_inputs", None)
# Tests window sizes up to 5 samples.
for size, sym in product(range(6), (True, False)):
yield SampleInput(
size,
*args,
sym=sym,
device=device,
dtype=dtype,
requires_grad=requires_grad,
**kwargs,
)
def reference_inputs_window(op_info, device, dtype, requires_grad, *args, **kwargs):
r"""Reference inputs function to use for windows which have a common signature, i.e.,
window size and sym only.
Implement other special functions for windows that have a specific signature.
See exponential and gaussian windows for instance.
"""
yield from sample_inputs_window(
op_info, device, dtype, requires_grad, *args, **kwargs
)
cases = (8, 16, 32, 64, 128, 256)
for size in cases:
yield SampleInput(size, sym=False)
yield SampleInput(size, sym=True)
def reference_inputs_exponential_window(
op_info, device, dtype, requires_grad, **kwargs
):
yield from sample_inputs_window(op_info, device, dtype, requires_grad, **kwargs)
cases = (
(8, {"center": 4, "tau": 0.5}),
(16, {"center": 8, "tau": 2.5}),
(32, {"center": 16, "tau": 43.5}),
(64, {"center": 20, "tau": 3.7}),
(128, {"center": 62, "tau": 99}),
(256, {"tau": 10}),
)
for size, kw in cases:
yield SampleInput(size, sym=False, **kw)
kw["center"] = None
yield SampleInput(size, sym=True, **kw)
def reference_inputs_gaussian_window(op_info, device, dtype, requires_grad, **kwargs):
yield from sample_inputs_window(op_info, device, dtype, requires_grad, **kwargs)
cases = (
(8, {"std": 0.1}),
(16, {"std": 1.2}),
(32, {"std": 2.1}),
(64, {"std": 3.9}),
(128, {"std": 4.5}),
(256, {"std": 10}),
)
for size, kw in cases:
yield SampleInput(size, sym=False, **kw)
yield SampleInput(size, sym=True, **kw)
def reference_inputs_kaiser_window(op_info, device, dtype, requires_grad, **kwargs):
yield from sample_inputs_window(op_info, device, dtype, requires_grad, **kwargs)
cases = (
(8, {"beta": 2}),
(16, {"beta": 12}),
(32, {"beta": 30}),
(64, {"beta": 35}),
(128, {"beta": 41.2}),
(256, {"beta": 100}),
)
for size, kw in cases:
yield SampleInput(size, sym=False, **kw)
yield SampleInput(size, sym=True, **kw)
def reference_inputs_general_cosine_window(
op_info, device, dtype, requires_grad, **kwargs
):
yield from sample_inputs_window(op_info, device, dtype, requires_grad, **kwargs)
cases = (
(8, {"a": [0.5, 0.5]}),
(16, {"a": [0.46, 0.54]}),
(32, {"a": [0.46, 0.23, 0.31]}),
(64, {"a": [0.5]}),
(128, {"a": [0.1, 0.8, 0.05, 0.05]}),
(256, {"a": [0.2, 0.2, 0.2, 0.2, 0.2]}),
)
for size, kw in cases:
yield SampleInput(size, sym=False, **kw)
yield SampleInput(size, sym=True, **kw)
def reference_inputs_general_hamming_window(
op_info, device, dtype, requires_grad, **kwargs
):
yield from sample_inputs_window(op_info, device, dtype, requires_grad, **kwargs)
cases = (
(8, {"alpha": 0.54}),
(16, {"alpha": 0.5}),
(32, {"alpha": 0.23}),
(64, {"alpha": 0.8}),
(128, {"alpha": 0.9}),
(256, {"alpha": 0.05}),
)
for size, kw in cases:
yield SampleInput(size, sym=False, **kw)
yield SampleInput(size, sym=True, **kw)
def error_inputs_window(op_info, device, *args, **kwargs):
# Tests for windows that have a negative size
yield ErrorInput(
SampleInput(-1, *args, dtype=torch.float32, device=device, **kwargs),
error_type=ValueError,
error_regex="requires non-negative window length, got M=-1",
)
# Tests for window tensors that are not torch.strided, for instance, torch.sparse_coo.
yield ErrorInput(
SampleInput(
3,
*args,
layout=torch.sparse_coo,
device=device,
dtype=torch.float32,
**kwargs,
),
error_type=ValueError,
error_regex="is implemented for strided tensors only, got: torch.sparse_coo",
)
# Tests for window tensors that are not floating point dtypes, for instance, torch.long.
yield ErrorInput(
SampleInput(3, *args, dtype=torch.long, device=device, **kwargs),
error_type=ValueError,
error_regex="expects float32 or float64 dtypes, got: torch.int64",
)
# Tests for window tensors that are bfloat16
yield ErrorInput(
SampleInput(3, *args, dtype=torch.bfloat16, device=device, **kwargs),
error_type=ValueError,
error_regex="expects float32 or float64 dtypes, got: torch.bfloat16",
)
# Tests for window tensors that are float16
yield ErrorInput(
SampleInput(3, *args, dtype=torch.float16, device=device, **kwargs),
error_type=ValueError,
error_regex="expects float32 or float64 dtypes, got: torch.float16",
)
def error_inputs_exponential_window(op_info, device, **kwargs):
# Yield common error inputs
yield from error_inputs_window(op_info, device, **kwargs)
# Tests for negative decay values.
yield ErrorInput(
SampleInput(3, tau=-1, dtype=torch.float32, device=device, **kwargs),
error_type=ValueError,
error_regex="Tau must be positive, got: -1 instead.",
)
# Tests for symmetric windows and a given center value.
yield ErrorInput(
SampleInput(3, center=1, sym=True, dtype=torch.float32, device=device),
error_type=ValueError,
error_regex="Center must be None for symmetric windows",
)
def error_inputs_gaussian_window(op_info, device, **kwargs):
# Yield common error inputs
yield from error_inputs_window(op_info, device, std=0.5, **kwargs)
# Tests for negative standard deviations
yield ErrorInput(
SampleInput(3, std=-1, dtype=torch.float32, device=device, **kwargs),
error_type=ValueError,
error_regex="Standard deviation must be positive, got: -1 instead.",
)
def error_inputs_kaiser_window(op_info, device, **kwargs):
# Yield common error inputs
yield from error_inputs_window(op_info, device, beta=12, **kwargs)
# Tests for negative beta
yield ErrorInput(
SampleInput(3, beta=-1, dtype=torch.float32, device=device, **kwargs),
error_type=ValueError,
error_regex="beta must be non-negative, got: -1 instead.",
)
def error_inputs_general_cosine_window(op_info, device, **kwargs):
# Yield common error inputs
yield from error_inputs_window(op_info, device, a=[0.54, 0.46], **kwargs)
# Tests for negative beta
yield ErrorInput(
SampleInput(3, a=None, dtype=torch.float32, device=device, **kwargs),
error_type=TypeError,
error_regex="Coefficients must be a list/tuple",
)
yield ErrorInput(
SampleInput(3, a=[], dtype=torch.float32, device=device, **kwargs),
error_type=ValueError,
error_regex="Coefficients cannot be empty",
)
def reference_signal_window(fn: Callable):
r"""Wrapper for scipy signal window references.
Discards keyword arguments for window reference functions that don't have a matching signature with
torch, e.g., gaussian window.
"""
def _fn(
*args,
dtype=numpy.float64,
device=None,
layout=torch.strided,
requires_grad=False,
**kwargs,
):
r"""The unused arguments are defined to disregard those values"""
return fn(*args, **kwargs).astype(dtype)
return _fn
def make_signal_windows_opinfo(
name: str,
ref: Callable,
sample_inputs_func: Callable,
reference_inputs_func: Callable,
error_inputs_func: Callable,
*,
skips: tuple[DecorateInfo, ...] = (),
):
r"""Helper function to create OpInfo objects related to different windows."""
return OpInfo(
name=name,
ref=ref if TEST_SCIPY else None,
dtypes=floating_types(),
sample_inputs_func=sample_inputs_func,
reference_inputs_func=reference_inputs_func,
error_inputs_func=error_inputs_func,
supports_out=False,
supports_autograd=False,
skips=(
# TODO: same as this?
# https://github.com/pytorch/pytorch/issues/81774
# also see: arange, new_full
# fails to match any schemas despite working in the interpreter
DecorateInfo(
unittest.expectedFailure,
"TestOperatorSignatures",
"test_get_torch_func_signature_exhaustive",
),
# fails to match any schemas despite working in the interpreter
DecorateInfo(
unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"
),
# skip these tests since we have non tensor input
DecorateInfo(
unittest.skip("Skipped!"), "TestCommon", "test_noncontiguous_samples"
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_variant_consistency_eager",
),
DecorateInfo(unittest.skip("Skipped!"), "TestMathBits", "test_conj_view"),
DecorateInfo(
unittest.skip("Skipped!"), "TestMathBits", "test_neg_conj_view"
),
DecorateInfo(unittest.skip("Skipped!"), "TestMathBits", "test_neg_view"),
DecorateInfo(
unittest.skip("Skipped!"),
"TestVmapOperatorsOpInfo",
"test_vmap_exhaustive",
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestVmapOperatorsOpInfo",
"test_op_has_batch_rule",
),
DecorateInfo(
unittest.skip("Buggy on MPS for now (mistakenly promotes to float64)"),
"TestCommon",
"test_numpy_ref_mps",
),
*skips,
),
)
op_db: list[OpInfo] = [
make_signal_windows_opinfo(
name="signal.windows.hamming",
ref=reference_signal_window(scipy.signal.windows.hamming)
if TEST_SCIPY
else None,
sample_inputs_func=sample_inputs_window,
reference_inputs_func=reference_inputs_window,
error_inputs_func=error_inputs_window,
),
make_signal_windows_opinfo(
name="signal.windows.hann",
ref=reference_signal_window(scipy.signal.windows.hann) if TEST_SCIPY else None,
sample_inputs_func=sample_inputs_window,
reference_inputs_func=reference_inputs_window,
error_inputs_func=error_inputs_window,
),
make_signal_windows_opinfo(
name="signal.windows.bartlett",
ref=reference_signal_window(scipy.signal.windows.bartlett)
if TEST_SCIPY
else None,
sample_inputs_func=sample_inputs_window,
reference_inputs_func=reference_inputs_window,
error_inputs_func=error_inputs_window,
),
make_signal_windows_opinfo(
name="signal.windows.blackman",
ref=reference_signal_window(scipy.signal.windows.blackman)
if TEST_SCIPY
else None,
sample_inputs_func=sample_inputs_window,
reference_inputs_func=reference_inputs_window,
error_inputs_func=error_inputs_window,
),
make_signal_windows_opinfo(
name="signal.windows.cosine",
ref=reference_signal_window(scipy.signal.windows.cosine)
if TEST_SCIPY
else None,
sample_inputs_func=sample_inputs_window,
reference_inputs_func=reference_inputs_window,
error_inputs_func=error_inputs_window,
),
make_signal_windows_opinfo(
name="signal.windows.exponential",
ref=reference_signal_window(scipy.signal.windows.exponential)
if TEST_SCIPY
else None,
sample_inputs_func=partial(sample_inputs_window, tau=2.78),
reference_inputs_func=partial(reference_inputs_exponential_window, tau=2.78),
error_inputs_func=error_inputs_exponential_window,
),
make_signal_windows_opinfo(
name="signal.windows.gaussian",
ref=reference_signal_window(scipy.signal.windows.gaussian)
if TEST_SCIPY
else None,
sample_inputs_func=partial(sample_inputs_window, std=1.92),
reference_inputs_func=partial(reference_inputs_gaussian_window, std=1.92),
error_inputs_func=error_inputs_gaussian_window,
skips=(
DecorateInfo(
unittest.skip("Buggy on MPS for now (mistakenly promotes to float64)"),
"TestCommon",
"test_numpy_ref_mps",
),
),
),
make_signal_windows_opinfo(
name="signal.windows.kaiser",
ref=reference_signal_window(scipy.signal.windows.kaiser)
if TEST_SCIPY
else None,
sample_inputs_func=partial(sample_inputs_window, beta=12.0),
reference_inputs_func=partial(reference_inputs_kaiser_window, beta=12.0),
error_inputs_func=error_inputs_kaiser_window,
),
make_signal_windows_opinfo(
name="signal.windows.general_cosine",
ref=reference_signal_window(scipy.signal.windows.general_cosine)
if TEST_SCIPY
else None,
sample_inputs_func=partial(sample_inputs_window, a=[0.54, 0.46]),
reference_inputs_func=partial(
reference_inputs_general_cosine_window, a=[0.54, 0.46]
),
error_inputs_func=error_inputs_general_cosine_window,
),
make_signal_windows_opinfo(
name="signal.windows.general_hamming",
ref=reference_signal_window(scipy.signal.windows.general_hamming)
if TEST_SCIPY
else None,
sample_inputs_func=partial(sample_inputs_window, alpha=0.54),
reference_inputs_func=partial(
reference_inputs_general_hamming_window, alpha=0.54
),
error_inputs_func=error_inputs_window,
),
make_signal_windows_opinfo(
name="signal.windows.nuttall",
ref=reference_signal_window(scipy.signal.windows.nuttall)
if TEST_SCIPY
else None,
sample_inputs_func=sample_inputs_window,
reference_inputs_func=reference_inputs_window,
error_inputs_func=error_inputs_window,
),
]
```
|
============================================================================================================================================
SOURCE CODE FILE: sparse.py
LINES: 1
SIZE: 33.89 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\opinfo\definitions\sparse.py
ENCODING: utf-8
```py
# mypy: ignore-errors
import os
import torch
from torch.testing import make_tensor # noqa: F401
from torch.testing._internal.opinfo.core import ( # noqa: F401
BinaryUfuncInfo,
ErrorInput,
generate_elementwise_binary_tensors,
ReductionOpInfo,
sample_inputs_reduction,
SampleInput,
)
def _check_validate(op_info, sample):
def _check_fail(sample):
try:
op_info(
sample.sample_input.input,
*sample.sample_input.args,
**sample.sample_input.kwargs,
)
except sample.error_type:
pass
except Exception as msg:
raise AssertionError( # noqa: B904
f"{op_info.name} on {sample.sample_input=} expected exception "
f"{sample.error_type}: {sample.error_regex}, got {type(msg).__name__}: {msg}"
)
else:
raise AssertionError(
f"{op_info.name} on {sample.sample_input=} expected exception "
f"{sample.error_type}: {sample.error_regex}, got none."
)
def _check_success(sample):
try:
op_info(sample.input, *sample.args, **sample.kwargs)
except Exception as msg:
raise AssertionError( # noqa: B904
f"{op_info.name} on {sample=} expected to succeed "
f", got {type(msg).__name__}: {msg}"
)
if isinstance(sample, ErrorInput):
_check_fail(sample)
else:
_check_success(sample)
def _sample_inputs_sparse(
sample_inputs,
maybe_failing_sample_inputs,
validate_sample_input,
op_info,
*args,
**kwargs,
):
check_validate = (
os.environ.get("PYTORCH_TEST_CHECK_VALIDATE_SPARSE_SAMPLES", "0") == "1"
)
for sample in sample_inputs(op_info, *args, **kwargs):
sample = validate_sample_input(op_info, sample, check_validate=check_validate)
if isinstance(sample, SampleInput):
yield sample
# Error inputs are handled in error_inputs_sparse
for sample in maybe_failing_sample_inputs(op_info, *args, **kwargs):
sample = validate_sample_input(op_info, sample, check_validate=check_validate)
if isinstance(sample, SampleInput):
yield sample
def _error_inputs_sparse(
maybe_failing_sample_inputs, validate_sample_input, op_info, *args, **kwargs
):
check_validate = (
os.environ.get("PYTORCH_TEST_CHECK_VALIDATE_SPARSE_SAMPLES", "0") == "1"
)
for sample in maybe_failing_sample_inputs(op_info, *args, **kwargs):
sample = validate_sample_input(op_info, sample, check_validate=check_validate)
if isinstance(sample, ErrorInput):
yield sample
# Sample inputs are handled in sample_inputs_sparse
def _apply_requires_grad_to_samples(sample_inputs):
"""Decorator to _maybe_failing_sample_inputs_... generator functions
that clones and sets requires_grad argument to tensors in sample
input arguments. This is needed when the generated samples share
tensor instances.
"""
def wrapper(op_info, device, dtype, requires_grad, layout, **kwargs):
def apply_requires_grad(x):
if (
not isinstance(x, torch.Tensor)
or x.requires_grad
or not requires_grad
or not (x.is_floating_point() or x.is_complex())
):
return x
return x.detach().clone().requires_grad_(requires_grad)
if requires_grad:
for sample_input in sample_inputs(
op_info, device, dtype, requires_grad, layout, **kwargs
):
yield sample_input.transform(apply_requires_grad)
else:
yield from sample_inputs(
op_info, device, dtype, requires_grad, layout, **kwargs
)
return wrapper
def sample_inputs_sparse_reduction(
op_info, device, dtype, requires_grad, layout, blocksize=None, **kwargs
):
"""Sample inputs for reduction operations on sparse tensors."""
layout_name = str(layout).split(".", 1)[-1].rsplit("_coo", 1)[0]
op_supports_layout = getattr(op_info, "supports_" + layout_name)
if not op_supports_layout:
return
for sample_input in sample_inputs_reduction(
op_info, device, dtype, requires_grad, **kwargs
):
if sample_input.input.ndim == 0:
# scalar sparse tensors are not supported
continue
if layout in {
torch.sparse_csr,
torch.sparse_csc,
torch.sparse_bsr,
torch.sparse_bsc,
}:
if sample_input.input.ndim < 2:
# conversion to sparse compressed tensors requires at
# least 2 dimensional tensors
continue
if sample_input.input.ndim > 2 and (sample_input.input == 0).any():
# Skip batched sparse compressed samples that contain
# explicit zeros because to_sparse(layout=..) will
# fail, see gh-98495.
# TODO: remove this if-block after gh-98495 is fixed.
continue
if layout in {torch.sparse_bsr, torch.sparse_bsc} and blocksize is None:
blocksize = (1, 1)
yield SampleInput(
sample_input.input.detach()
.to_sparse(layout=layout, blocksize=blocksize)
.requires_grad_(requires_grad),
args=sample_input.args,
kwargs=sample_input.kwargs,
)
if layout is torch.sparse_coo and (dtype.is_floating_point or dtype.is_complex):
# uncoalesced samples
inp = sample_input.input.detach().to_sparse(layout=layout)
inp = torch.sparse_coo_tensor(
inp.indices().repeat(1, 2),
inp.values().repeat(2),
inp.shape,
dtype=inp.dtype,
device=inp.device,
)
assert not inp.is_coalesced()
yield SampleInput(
inp.requires_grad_(requires_grad),
args=sample_input.args,
kwargs=sample_input.kwargs,
)
if sample_input.input.ndim > 2:
# hybrid samples
yield SampleInput(
sample_input.input.detach()
.to_sparse(
layout=layout,
blocksize=blocksize,
dense_dim=sample_input.input.ndim - 2,
)
.requires_grad_(requires_grad),
args=sample_input.args,
kwargs=sample_input.kwargs,
)
def _validate_sample_input_sparse_reduction(op_info, sample, check_validate=False):
"""Return the specified sample when it is valid and supported by the
operation. Otherwise, return the sample as ErrorInput instance.
When check_validate is True, the result is validated against
calling the op on the sample.
"""
UNSPECIFIED = object()
if op_info.name == "sum":
sample = _validate_sample_input_sparse_reduction_sum(sample)
if op_info.name in {"masked.sum"}:
mask = sample.kwargs.get("mask", UNSPECIFIED)
if (
mask not in {None, UNSPECIFIED}
and mask.ndim > 2
and mask.layout is torch.strided
and (mask == 0).any()
):
# TODO: remove this if-block after gh-98495 is fixed.
sample = ErrorInput(
sample,
error_regex="Expect the same number of specified elements per batch.",
)
elif not sample.kwargs.get("keepdim"):
sample = ErrorInput(
sample,
error_type=(AssertionError, RuntimeError),
error_regex="reduction operations on (CSR|CSC) tensors with keepdim=False is unsupported",
)
elif mask is UNSPECIFIED:
sample = ErrorInput(
sample,
error_type=ValueError,
error_regex="masked (.*) expects explicit mask for sparse_csr tensor input",
)
elif sample.input.ndim > 2:
sample = ErrorInput(
sample,
error_regex="crow_indices is supposed to be a vector, but got 3 dimensional tensor.",
)
if op_info.name in {"masked.amax", "masked.amin", "masked.mean", "masked.prod"}:
t_inp = sample.input
mask = sample.kwargs.get("mask")
if (
mask is not None
and mask.ndim > 2
and mask.layout is torch.strided
and (mask == 0).any()
):
# TODO: remove this if-block after gh-98495 is fixed.
sample = ErrorInput(
sample,
error_regex="Expect the same number of specified elements per batch.",
)
elif mask is None:
sample = ErrorInput(
sample,
error_type=ValueError,
error_regex="masked (.*) expects explicit mask for sparse_csr tensor input",
)
elif (
mask.layout is sample.input.layout
and mask.ndim > 2
and op_info.name == "masked.mean"
):
sample = ErrorInput(
sample,
error_type=TypeError,
error_regex=(
"where[(][)] received an invalid combination of arguments"
" - got [(]Tensor, Tensor, NoneType[)]"
),
)
elif not sample.kwargs.get("keepdim"):
sample = ErrorInput(
sample,
error_type=(AssertionError, RuntimeError),
error_regex="reduction operations on (CSR|CSC) tensors with keepdim=False is unsupported",
)
elif (
sample.input.ndim > 2
and (sample.kwargs.get("dim") not in {0, 1})
and mask.ndim > 2
and mask.layout is not torch.strided
):
if sample.kwargs.get("dim") == (0, -1):
sample = ErrorInput(
sample,
error_regex="tensor dimensionality must be sum of batch, base, and dense dimensionalities",
)
elif op_info.name == "masked.prod":
sample = ErrorInput(
sample,
error_regex="input_dim == 2 INTERNAL ASSERT FAILED at",
)
else:
sample = ErrorInput(
sample,
error_type=AssertionError,
error_regex="Sparse CSR tensors are 2D and only support reduction along dim 0 or 1.",
)
elif sample.input.ndim > 2:
sample = ErrorInput(
sample,
error_regex="crow_indices is supposed to be a vector, but got 3 dimensional tensor.",
)
elif (
mask.layout is t_inp.layout
and mask._nnz() != t_inp._nnz()
and t_inp.dense_dim() > 0
):
sample = ErrorInput(
sample,
error_regex="Index tensor must have the same number of dimensions as src tensor",
)
if check_validate:
_check_validate(op_info, sample)
return sample
def _validate_sample_input_sparse_reduction_sum(sample, check_validate=False):
# NOTE: When fixing a failing sample case, remove the
# corresponding if-block
t_inp, t_kwargs = sample.input, sample.kwargs
dim = t_kwargs.get("dim")
keepdim = t_kwargs.get("keepdim")
layout = t_inp.layout
if isinstance(dim, (int, list, tuple)):
if layout in {
torch.sparse_csr,
torch.sparse_csc,
torch.sparse_bsr,
torch.sparse_bsc,
}:
if layout in {torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc}:
return ErrorInput(
sample,
error_regex=(
"Currently the only compressed sparse format supported for sum.dim_IntList is CSR, but got layout"
),
)
if layout in {torch.sparse_csr, torch.sparse_csc} and not keepdim:
return ErrorInput(
sample,
error_regex=(
"reduction operations on CSR tensors with keepdim=False is unsupported"
),
)
if t_inp.dim() != 2:
return ErrorInput(
sample,
error_regex=("input_dim == 2 INTERNAL ASSERT"),
)
if layout == torch.sparse_csr:
if t_inp.dtype == torch.bool:
return ErrorInput(
sample,
error_regex=("_sparse_csr_sum_cpu not implemented for 'Bool'"),
)
if t_inp.dtype == torch.complex32:
return ErrorInput(
sample,
error_regex=(
"_sparse_csr_sum_cuda not implemented for 'ComplexHalf'"
),
)
return sample
def _maybe_failing_sample_inputs_sparse_reduction_sum(
op_info, device, dtype, requires_grad, layout, **kwargs
):
"""Generator of samples that are known to fail or that were failing in past."""
# NOTE: When fixing a failing case, remove the Exception comment
# but keep the `yield sample` statement.
if layout in [
torch.sparse_csr,
torch.sparse_csc,
]:
# NotImplementedError: Could not run 'aten::sum.IntList_out' with arguments from the 'SparseCsrCPU' backend.
yield SampleInput(
torch.tensor([[0, 1], [2, 3]], dtype=dtype)
.to_sparse(layout=layout)
.requires_grad_(requires_grad),
kwargs=dict(dim=0, keepdim=True),
)
yield SampleInput(
torch.tensor([[[0, 1]], [[2, 3]]], dtype=dtype)
.to_sparse(layout=layout, dense_dim=1)
.requires_grad_(requires_grad),
kwargs=dict(dim=0),
)
yield SampleInput(
torch.tensor([[0, 1], [2, 3]], dtype=dtype)
.to_sparse(layout=layout)
.requires_grad_(requires_grad),
kwargs=dict(dim=(0,)),
)
yield SampleInput(
torch.tensor([[0, 1], [2, 3]], dtype=dtype)
.to_sparse(layout=layout)
.requires_grad_(requires_grad),
kwargs=dict(dim=(0,), keepdim=True),
)
yield SampleInput(
torch.tensor([[[0, 1]], [[2, 3]]], dtype=dtype)
.to_sparse(layout=layout, dense_dim=1)
.requires_grad_(requires_grad),
kwargs=dict(dim=(0,)),
)
# RuntimeError: torch.empty: Only batched sparse compressed (non-block) tensors are supported, but got size [2]
yield SampleInput(
torch.tensor([[0, 1], [2, 3]], dtype=dtype)
.to_sparse(layout=layout)
.requires_grad_(requires_grad),
kwargs=dict(dim=0),
)
if layout in [
torch.sparse_bsr,
torch.sparse_bsc,
]:
# RuntimeError: empty_sparse_compressed expected sparse compressed (non-block) tensor layout but got SparseBsr
yield SampleInput(
torch.tensor([[0, 1], [2, 3]], dtype=dtype)
.to_sparse(layout=layout, blocksize=(2, 2))
.requires_grad_(requires_grad),
kwargs=dict(dim=0, keepdim=True),
)
yield SampleInput(
torch.tensor([[[0, 1]], [[2, 3]]], dtype=dtype)
.to_sparse(layout=layout, dense_dim=1, blocksize=(1, 1))
.requires_grad_(requires_grad),
kwargs=dict(dim=0),
)
yield SampleInput(
torch.tensor([[0, 1], [2, 3]], dtype=dtype)
.to_sparse(layout=layout, blocksize=(1, 1))
.requires_grad_(requires_grad),
kwargs=dict(dim=(0,)),
)
yield SampleInput(
torch.tensor([[0, 1], [2, 3]], dtype=dtype)
.to_sparse(layout=layout, blocksize=(1, 1))
.requires_grad_(requires_grad),
kwargs=dict(dim=(0,), keepdim=True),
)
yield SampleInput(
torch.tensor([[[0, 1]], [[2, 3]]], dtype=dtype)
.to_sparse(layout=layout, blocksize=(1, 1), dense_dim=1)
.requires_grad_(requires_grad),
kwargs=dict(dim=(0,)),
)
# RuntimeError: torch.empty: Only batched sparse compressed (non-block) tensors are supported, but got size [2]
yield SampleInput(
torch.tensor([[0, 1], [2, 3]], dtype=dtype)
.to_sparse(layout=layout, blocksize=(1, 1))
.requires_grad_(requires_grad),
kwargs=dict(dim=0),
)
def sample_inputs_sparse_reduction_sum(
op_info, device, dtype, requires_grad, layout, **kwargs
):
"""Sample inputs for sum on sparse tensors."""
yield from _sample_inputs_sparse(
sample_inputs_sparse_reduction,
_maybe_failing_sample_inputs_sparse_reduction_sum,
_validate_sample_input_sparse_reduction,
op_info,
device,
dtype,
requires_grad,
layout,
**kwargs,
)
def error_inputs_sparse_reduction_sum(op_info, device, layout, **kwargs):
"""Error inputs for sum on sparse tensors."""
dtype = torch.float64
requires_grad = False
yield from _error_inputs_sparse(
_maybe_failing_sample_inputs_sparse_reduction_sum,
_validate_sample_input_sparse_reduction,
op_info,
device,
dtype,
requires_grad,
layout,
**kwargs,
)
def sample_inputs_sparse_elementwise_binary_operation(
op_info, device, dtype, requires_grad, layout, **kwargs
):
"""Sample inputs for elementwise binary operations on sparse tensors.
The samples include regular, zero-sized, batched, and hybrid
sparse tensors as well as rhs scalars. All tensors are full tensors.
"""
def _to_sparse(tensor, **kwargs):
return tensor.detach().to_sparse(**kwargs).requires_grad_(requires_grad)
for sample_input in generate_elementwise_binary_tensors(
op_info,
device=device,
dtype=dtype,
requires_grad=requires_grad,
exclude_zero=True,
**kwargs,
):
lhs, rhs = sample_input.input, sample_input.args[0]
min_dense_dim = 0
max_dense_dim = lhs.ndim - 1
if layout in {
torch.sparse_csr,
torch.sparse_csc,
torch.sparse_bsr,
torch.sparse_bsc,
}:
if lhs.ndim < 2:
# sparse compressed tensors sparse_dim must be 2
continue
max_dense_dim = lhs.ndim - 2
for dense_dim in range(min_dense_dim, max_dense_dim + 1):
if layout in {torch.sparse_bsr, torch.sparse_bsc}:
blocksizes = [(1, 1)]
if lhs.numel() > 0:
blocksizes.append(
(
lhs.shape[lhs.ndim - 2 - dense_dim],
lhs.shape[lhs.ndim - 1 - dense_dim],
)
)
else:
blocksizes = [None]
for blocksize in blocksizes:
to_sparse_kwargs = dict(
layout=layout, dense_dim=dense_dim, blocksize=blocksize
)
lhs_sparse = _to_sparse(lhs, **to_sparse_kwargs)
rhs_sparse = _to_sparse(rhs, **to_sparse_kwargs)
# op(sparse, sparse)
yield SampleInput(
lhs_sparse,
args=(rhs_sparse, *sample_input.args[1:]),
kwargs=sample_input.kwargs,
)
# op(sparse, scalar)
yield SampleInput(
lhs_sparse,
args=(
make_tensor(
(), dtype=dtype, device=device, requires_grad=requires_grad
),
*sample_input.args[1:],
),
kwargs=sample_input.kwargs,
)
def _validate_sample_input_elementwise_binary_sparse_mul(sample):
# NOTE: When fixing a failing sample case, remove the
# corresponding if-block
t_inp, t_args = sample.input, sample.args
batch_dim = t_inp.dim() - t_inp.dense_dim() - t_inp.sparse_dim()
layout = t_inp.layout
dtype = t_inp.dtype
if layout is torch.sparse_csr and batch_dim > 0 and t_args[0].ndim > 0:
return ErrorInput(
sample,
error_regex=(
"coo_to_sparse_csr: conversion from Sparse to SparseCsr for input"
" tensors with sparse_dim[(][)]!=2 is not supported"
),
)
elif layout is torch.sparse_csc and t_args[0].ndim > 0:
return ErrorInput(
sample, error_regex="Expected result Tensor to be of format CSR"
)
elif layout is torch.sparse_bsr and t_args[0].ndim > 0:
return ErrorInput(
sample,
error_regex="empty_sparse_compressed expected sparse compressed [(]non-block[)] tensor layout but got SparseBsr",
)
elif layout is torch.sparse_bsc and t_args[0].ndim > 0:
return ErrorInput(
sample,
error_regex="empty_sparse_compressed expected sparse compressed [(]non-block[)] tensor layout but got SparseBsc",
)
elif (
layout is torch.sparse_coo
and dtype is torch.bool
and t_args[0].ndim > 0
and t_inp.is_cpu
and t_inp.numel() > 0
and t_inp.dense_dim() > 0
):
return ErrorInput(
sample, error_regex="\"addcmul_cpu_out\" not implemented for 'Bool'"
)
elif (
layout in {torch.sparse_coo, torch.sparse_csr}
and dtype is torch.bool
and t_inp._nnz() > 0
and t_args[0].ndim > 0
and t_inp.is_cpu
and t_inp.numel() > 0
):
return ErrorInput(
sample, error_regex="\"mul_out_sparse\" not implemented for 'Bool'"
)
elif (
layout is torch.sparse_csr
and t_args[0].layout is torch.strided
and 0 < t_args[0].ndim
and t_args[0].ndim < t_inp.ndim
):
return ErrorInput(
sample, error_regex="sparse_mask_sparse_csr expects self to be 2D"
)
elif layout is torch.sparse_csr and (
(t_args[0].layout is torch.strided and 0 < t_args[0].ndim)
or (t_args[0].layout is layout and t_inp.shape != t_args[0].shape)
):
return ErrorInput(
sample,
error_regex=(
"expects sparse inputs with equal dimensionality, number of sparse dimensions,"
" and shape of sparse dimensions"
),
)
elif (
layout is torch.sparse_csr
and t_inp.dense_dim() > 0
and t_inp._nnz() > 0
and t_inp.is_cpu
and dtype is torch.float16
and t_args[0].ndim > 0
):
return ErrorInput(
sample, error_regex="\"addcmul_cpu_out\" not implemented for 'Half'"
)
return sample
@_apply_requires_grad_to_samples
def _maybe_failing_sample_inputs_sparse_elementwise_binary_mul(
op_info, device, dtype, requires_grad, layout, **kwargs
):
"""Generator of samples that are known to fail or that were failing in past."""
# NOTE: When fixing a failing case, remove the Exception comment
# but keep the `yield sample` statement.
blocksize = (1, 1) if layout in {torch.sparse_bsr, torch.sparse_bsc} else None
regular = torch.tensor([[1, 2], [3, 4]], device=device, dtype=dtype).to_sparse(
layout=layout, dense_dim=0, blocksize=blocksize
)
batch = torch.tensor(
[[[1, 2], [3, 4]], [[4, 5], [6, 7]]], device=device, dtype=dtype
).to_sparse(layout=layout, dense_dim=0, blocksize=blocksize)
hybrid = torch.tensor(
[[[1], [2]], [[3], [4]]], device=device, dtype=dtype
).to_sparse(layout=layout, dense_dim=1, blocksize=blocksize)
if layout is torch.sparse_csr:
# RuntimeError: crow_indices is supposed to be a vector, but got 2 dimensional tensor
yield SampleInput(batch, args=(batch,))
# RuntimeError: Only tensors with two sparse dimensions can be
# converted to the SparseCsr layout, got self with 3 sparse
# dimensions.
yield SampleInput(
torch.zeros_like(hybrid).requires_grad_(requires_grad),
args=(torch.zeros_like(hybrid).requires_grad_(requires_grad),),
)
if dtype is torch.complex32:
# RuntimeError: "mul_out_sparse" not implemented for 'ComplexHalf'
yield SampleInput(regular, args=(regular,))
if dtype is torch.bool and regular.is_cpu:
# RuntimeError: "mul_out_sparse" not implemented for 'Bool'
yield SampleInput(regular, args=(regular,))
if layout is torch.sparse_csc:
# RuntimeError: Expected result Tensor to be of format CSR
yield SampleInput(regular, args=(regular,))
if layout is torch.sparse_bsr:
# RuntimeError: empty_sparse_compressed expected sparse compressed (non-block) tensor layout but got SparseBsr
yield SampleInput(regular, args=(regular,))
if layout is torch.sparse_bsc:
# RuntimeError: empty_sparse_compressed expected sparse compressed (non-block) tensor layout but got SparseBsc
yield SampleInput(regular, args=(regular,))
if layout is torch.sparse_coo:
if dtype is torch.complex32:
# RuntimeError: "mul_out_sparse" not implemented for 'ComplexHalf'
yield SampleInput(regular, args=(regular,))
if dtype is torch.bool and regular.is_cpu:
# RuntimeError: "mul_out_sparse" not implemented for 'Bool'
yield SampleInput(regular, args=(regular,))
if dtype in {torch.bool, torch.float16} and regular.is_cpu:
# RuntimeError: "addcmul_cpu_out" not implemented for '(Bool|Half)'
yield SampleInput(hybrid, args=(hybrid,))
def _validate_sample_input_sparse_elementwise_binary_operation(
op_info, sample, check_validate=False
):
if op_info.name == "mul":
sample = _validate_sample_input_elementwise_binary_sparse_mul(sample)
if check_validate:
_check_validate(op_info, sample)
return sample
def sample_inputs_sparse_mul(op_info, device, dtype, requires_grad, layout, **kwargs):
"""Sample inputs for mul operation on sparse tensors."""
yield from _sample_inputs_sparse(
sample_inputs_sparse_elementwise_binary_operation,
_maybe_failing_sample_inputs_sparse_elementwise_binary_mul,
_validate_sample_input_sparse_elementwise_binary_operation,
op_info,
device,
dtype,
requires_grad,
layout,
**kwargs,
)
def error_inputs_sparse_mul(op_info, device, layout, **kwargs):
"""Error inputs for mul operation on sparse tensors."""
dtype = torch.float64
requires_grad = False
yield from _error_inputs_sparse(
_maybe_failing_sample_inputs_sparse_elementwise_binary_mul,
_validate_sample_input_sparse_elementwise_binary_operation,
op_info,
device,
dtype,
requires_grad,
layout,
**kwargs,
)
def _sample_inputs_sparse_like_fns(
op_info, device, dtype, requires_grad, layout, **kwargs
):
from torch.testing._internal.common_utils import TestCase
for tensor in TestCase().generate_simple_inputs(
layout,
device=device,
dtype=dtype,
enable_batch=True,
enable_hybrid=True,
enable_zero_sized=True,
enable_non_contiguous_indices=False,
enable_non_contiguous_values=False,
):
yield SampleInput(tensor, args=(), kwargs={})
yield SampleInput(
tensor, args=(), kwargs=dict(device=device, dtype=dtype, layout=layout)
)
if dtype is not torch.float64:
yield SampleInput(tensor, args=(), kwargs=dict(dtype=torch.float64))
if torch.cuda.is_available():
other_device = "cuda" if tensor.device.type == "cpu" else "cpu"
yield SampleInput(tensor, args=(), kwargs=dict(device=other_device))
if layout is torch.sparse_csr:
other_layout = torch.sparse_csc
elif layout is torch.sparse_csc:
other_layout = torch.sparse_csr
elif layout is torch.sparse_bsr:
other_layout = torch.sparse_bsc
elif layout is torch.sparse_bsc:
other_layout = torch.sparse_bsr
else:
other_layout = torch.strided
yield SampleInput(tensor, args=(), kwargs=dict(layout=other_layout))
if layout is not torch.sparse_coo:
yield SampleInput(tensor, args=(), kwargs=dict(layout=torch.sparse_coo))
def _validate_sample_input_sparse_like_fns(op_info, sample, check_validate=False):
if sample.input.layout in {
torch.sparse_csr,
torch.sparse_csc,
torch.sparse_bsr,
torch.sparse_bsc,
} and op_info.name not in {"zeros_like"}:
if sample.kwargs.get("layout", sample.input.layout) != sample.input.layout:
return ErrorInput(
sample,
error_regex=(
"empty_like with different sparse layout is not supported"
" \\(self is Sparse(Csc|Csr|Bsc|Bsr) but you requested Sparse(Csr|Csc|Bsr|Bsc)\\)"
),
)
if sample.input.layout is torch.sparse_coo:
return ErrorInput(
sample,
error_regex=(
"Could not run 'aten::normal_' with arguments from the 'Sparse(CPU|CUDA)' backend."
),
)
if check_validate:
_check_validate(op_info, sample)
return sample
def _maybe_failing_sample_inputs_sparse_like_fns(
op_info, device, dtype, requires_grad, layout, **kwargs
):
if torch.cuda.is_available() and layout is not torch.sparse_coo:
other_device = "cuda" if torch.device(device).type == "cpu" else "cpu"
if layout is torch.sparse_csr:
other_layout = torch.sparse_csc
elif layout is torch.sparse_csc:
other_layout = torch.sparse_csr
elif layout is torch.sparse_bsr:
other_layout = torch.sparse_bsc
elif layout is torch.sparse_bsc:
other_layout = torch.sparse_bsr
else:
other_layout = torch.strided
blocksize = (1, 1) if layout in {torch.sparse_bsr, torch.sparse_bsc} else None
yield SampleInput(
torch.tensor([[0, 1], [2, 3]], dtype=dtype, device=device).to_sparse(
layout=layout, blocksize=blocksize
),
kwargs=dict(device=other_device),
)
yield SampleInput(
torch.tensor([[0, 1], [2, 3]], dtype=dtype, device=device).to_sparse(
layout=layout, blocksize=blocksize
),
kwargs=dict(layout=other_layout),
)
def sample_inputs_sparse_like_fns(
op_info, device, dtype, requires_grad, layout, **kwargs
):
"""Sample inputs for like-functions on sparse tensors."""
yield from _sample_inputs_sparse(
_sample_inputs_sparse_like_fns,
_maybe_failing_sample_inputs_sparse_like_fns,
_validate_sample_input_sparse_like_fns,
op_info,
device,
dtype,
requires_grad,
layout,
**kwargs,
)
def error_inputs_sparse_like_fns(op_info, device, layout, **kwargs):
"""Error inputs for like-functions on sparse tensors."""
dtype = torch.float64
requires_grad = False
yield from _error_inputs_sparse(
_maybe_failing_sample_inputs_sparse_like_fns,
_validate_sample_input_sparse_like_fns,
op_info,
device,
dtype,
requires_grad,
layout,
**kwargs,
)
def _validate_sample_input_sparse_default(op_info, sample, check_validate=False):
if op_info.name == "to_sparse":
if (
sample.input.layout
in {torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc}
and len(sample.args) == 1
and isinstance(sample.args[0], int)
and sample.args[0] != 2
):
sample = ErrorInput(
sample,
error_regex="sparse dim argument must be 2 for sparse_compressed_to_sparse",
)
if check_validate:
_check_validate(op_info, sample)
return sample
def validate_sample_input_sparse(op_info, sample, check_validate=False):
"""Return the specified sample when it is valid and supported by the
operation. Otherwise, return the sample as ErrorInput instance.
When check_validate is True, the result is validated against
calling the op on the sample.
"""
if isinstance(op_info, ReductionOpInfo):
return _validate_sample_input_sparse_reduction(
op_info, sample, check_validate=check_validate
)
elif isinstance(op_info, BinaryUfuncInfo):
return _validate_sample_input_sparse_elementwise_binary_operation(
op_info, sample, check_validate=check_validate
)
else:
return _validate_sample_input_sparse_default(
op_info, sample, check_validate=check_validate
)
```
|
=============================================================================================================================================
SOURCE CODE FILE: special.py
LINES: 1
SIZE: 27.71 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\opinfo\definitions\special.py
ENCODING: utf-8
```py
# mypy: ignore-errors
import unittest
from functools import partial
from itertools import product
import numpy as np
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_device_type import (
precisionOverride,
tol,
toleranceOverride,
)
from torch.testing._internal.common_dtype import all_types_and, floating_types
from torch.testing._internal.common_utils import TEST_SCIPY, torch_to_numpy_dtype_dict
from torch.testing._internal.opinfo.core import (
BinaryUfuncInfo,
DecorateInfo,
L,
NumericsFilter,
OpInfo,
S,
SampleInput,
UnaryUfuncInfo,
)
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ElementwiseUnaryPythonRefInfo,
)
from torch.testing._internal.opinfo.utils import (
np_unary_ufunc_integer_promotion_wrapper,
)
if TEST_SCIPY:
import scipy.special
# TODO: Consolidate `i0e` with sample_inputs_unary when `make_tensor`,
# supports `exclude` argument.
# For more context: https://github.com/pytorch/pytorch/pull/56352#discussion_r633277617
def sample_inputs_i0_i1(op_info, device, dtype, requires_grad, **kwargs):
exclude_zero = requires_grad and op_info.op == torch.special.i0e
make_arg = partial(
make_tensor,
dtype=dtype,
device=device,
requires_grad=requires_grad,
exclude_zero=exclude_zero,
)
yield SampleInput(make_arg((S,)))
yield SampleInput(make_arg(()))
if requires_grad and not exclude_zero:
# Special Case for gradient
# Sample with `0` in the input
t = make_arg((S,))
t[0] = 0
yield SampleInput(t)
def sample_inputs_polygamma(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(
make_tensor,
device=device,
# TODO: eliminate low after gh-106692 is fixed:
low=(1 if dtype in {torch.int32, torch.int64} else None),
dtype=dtype,
requires_grad=requires_grad,
)
tensor_shapes = ((S, S), ())
ns = (1, 2, 3, 4, 5)
for shape, n in product(tensor_shapes, ns):
yield SampleInput(make_arg(shape), args=(n,))
def reference_polygamma(x, n):
# WEIRD `scipy.special.polygamma` behavior
# >>> scipy.special.polygamma(0, np.array(501, dtype=np.float32)).dtype
# dtype('float64')
# >>> scipy.special.polygamma(0, np.array([501], dtype=np.float32)).dtype
# dtype('float32')
#
# Thus we cast output to the default torch dtype or preserve double
result_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()]
if x.dtype == np.double:
result_dtype = np.double
return scipy.special.polygamma(n, x).astype(result_dtype)
def sample_inputs_entr(op_info, device, dtype, requires_grad, **kwargs):
low, _ = op_info.domain
if requires_grad:
low = 0 + op_info._domain_eps
make_arg = partial(
make_tensor, dtype=dtype, device=device, low=low, requires_grad=requires_grad
)
yield SampleInput(make_arg((L,)))
yield SampleInput(make_arg(()))
def sample_inputs_erfcx(op_info, device, dtype, requires_grad, **kwargs):
for shape in ((L,), (1, 0, 3), ()):
yield SampleInput(
make_tensor(
shape,
device=device,
dtype=dtype,
low=-5,
requires_grad=requires_grad,
),
)
op_db: list[OpInfo] = [
UnaryUfuncInfo(
"special.i0e",
aten_name="special_i0e",
ref=scipy.special.i0e if TEST_SCIPY else None,
decorators=(precisionOverride({torch.bfloat16: 3e-1, torch.float16: 3e-1}),),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_i0_i1,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
),
UnaryUfuncInfo(
"special.i1",
aten_name="special_i1",
ref=np_unary_ufunc_integer_promotion_wrapper(scipy.special.i1)
if TEST_SCIPY
else None,
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
backward_dtypes=floating_types(),
sample_inputs_func=sample_inputs_i0_i1,
decorators=(
DecorateInfo(
toleranceOverride(
{
torch.float32: tol(atol=1e-4, rtol=0),
torch.bool: tol(atol=1e-4, rtol=0),
}
)
),
),
skips=(
DecorateInfo(
unittest.skip("Incorrect result!"),
"TestUnaryUfuncs",
"test_reference_numerics_large",
dtypes=(torch.int8,),
),
),
supports_fwgrad_bwgrad=True,
supports_forward_ad=True,
),
UnaryUfuncInfo(
"special.i1e",
aten_name="special_i1e",
ref=scipy.special.i1e if TEST_SCIPY else None,
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
backward_dtypes=floating_types(),
sample_inputs_func=sample_inputs_i0_i1,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
),
UnaryUfuncInfo(
"special.ndtr",
aten_name="special_ndtr",
decorators=(precisionOverride({torch.bfloat16: 5e-3, torch.float16: 5e-4}),),
ref=scipy.special.ndtr if TEST_SCIPY else None,
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# Dispatch stub: unsupported device typemeta
DecorateInfo(
unittest.expectedFailure,
"TestFwdGradients",
"test_fn_fwgrad_bwgrad",
device_type="meta",
),
),
),
# A separate OpInfo entry for special.polygamma is needed to reorder the arguments
# for the alias. See the discussion here: https://github.com/pytorch/pytorch/pull/59691#discussion_r650261939
UnaryUfuncInfo(
"special.polygamma",
op=lambda x, n, **kwargs: torch.special.polygamma(n, x, **kwargs),
variant_test_name="special_polygamma_n_0",
ref=reference_polygamma if TEST_SCIPY else None,
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# lambda impl
DecorateInfo(
unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"
),
DecorateInfo(
unittest.expectedFailure,
"TestNormalizeOperators",
"test_normalize_operator_exhaustive",
),
),
sample_kwargs=lambda device, dtype, input: ({"n": 0}, {"n": 0}),
# polygamma functions have multiple singularities at x having non-positive integer value
reference_numerics_filter=NumericsFilter(
condition=lambda x: (x < 0.1) & ((x - x.round()).abs() < 1e-4), safe_val=1
),
),
BinaryUfuncInfo(
"special.xlog1py",
aten_name="special_xlog1py",
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
promotes_int_to_float=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_one_python_scalar=True,
# We don't test -1 as the gradient will be NaN and it'll break
rhs_make_tensor_kwargs=dict(low=-0.99),
),
BinaryUfuncInfo(
"special.zeta",
aten_name="special_zeta",
dtypes=all_types_and(torch.bool),
promotes_int_to_float=True,
supports_autograd=False,
supports_one_python_scalar=True,
skips=(
# Reference reference_inputs nans and infs on cuda and nan, inf, 0., -inf for cpu
DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"),
),
),
# TODO: FIXME
# OpInfo entry to verify the gradient formula of `other`/`q`
# BinaryUfuncInfo('special.zeta',
# op=lambda q, x, **kwargs: torch.special.zeta(x, q, **kwargs),
# aten_name='special_zeta',
# variant_test_name='grad',
# dtypes=all_types_and(torch.bool),
# promotes_int_to_float=True,
# supports_autograd=True,
# supports_rhs_python_scalar=False,
# decorators=[
# # Derivative wrt first tensor not implemented
# DecorateInfo(unittest.expectedFailure, "TestCommon",
# "test_floating_inputs_are_differentiable")
# ],
# skips=(
# # Lambda doesn't work in JIT test
# # AssertionError: JIT Test does not execute any logic
# DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit"),
# )),
UnaryUfuncInfo(
"special.entr",
ref=scipy.special.entr if TEST_SCIPY else None,
aten_name="special_entr",
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=(precisionOverride({torch.float16: 1e-1, torch.bfloat16: 1e-1}),),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestUnaryUfuncs",
"test_reference_numerics_large",
dtypes=[torch.bfloat16, torch.float16],
),
),
supports_inplace_autograd=False,
sample_inputs_func=sample_inputs_entr,
),
UnaryUfuncInfo(
"special.ndtri",
ref=scipy.special.ndtri if TEST_SCIPY else None,
domain=(0, 1),
aten_name="special_ndtri",
dtypes=all_types_and(torch.bool),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
),
UnaryUfuncInfo(
"special.log_ndtr",
aten_name="special_log_ndtr",
ref=scipy.special.log_ndtr if TEST_SCIPY else None,
dtypes=all_types_and(torch.bool),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
),
UnaryUfuncInfo(
"special.erfcx",
ref=scipy.special.erfcx if TEST_SCIPY else None,
aten_name="special_erfcx",
decorators=(
toleranceOverride(
{
torch.float32: tol(atol=0, rtol=4e-6),
}
),
),
dtypes=all_types_and(torch.bool),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_erfcx,
),
UnaryUfuncInfo(
"special.airy_ai",
decorators=(
precisionOverride(
{
torch.float32: 1e-03,
torch.float64: 1e-05,
},
),
),
dtypes=all_types_and(torch.bool),
ref=lambda x: scipy.special.airy(x)[0] if TEST_SCIPY else None,
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestUnaryUfuncs",
"test_reference_numerics_large",
),
),
supports_autograd=False,
),
UnaryUfuncInfo(
"special.bessel_j0",
decorators=(
precisionOverride(
{
torch.float32: 1e-04,
torch.float64: 1e-05,
},
),
),
dtypes=all_types_and(torch.bool),
ref=scipy.special.j0 if TEST_SCIPY else None,
supports_autograd=False,
),
UnaryUfuncInfo(
"special.bessel_j1",
decorators=(
precisionOverride(
{
torch.float32: 1e-04,
torch.float64: 1e-05,
},
),
),
dtypes=all_types_and(torch.bool),
ref=scipy.special.j1 if TEST_SCIPY else None,
supports_autograd=False,
),
UnaryUfuncInfo(
"special.bessel_y0",
decorators=(
precisionOverride(
{
torch.float32: 1e-04,
torch.float64: 1e-05,
},
),
),
dtypes=all_types_and(torch.bool),
ref=scipy.special.y0 if TEST_SCIPY else None,
supports_autograd=False,
),
UnaryUfuncInfo(
"special.bessel_y1",
decorators=(
precisionOverride(
{
torch.float32: 1e-04,
torch.float64: 1e-05,
},
),
),
dtypes=all_types_and(torch.bool),
ref=scipy.special.y1 if TEST_SCIPY else None,
supports_autograd=False,
),
BinaryUfuncInfo(
"special.chebyshev_polynomial_t",
dtypes=all_types_and(torch.bool),
promotes_int_to_float=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"),
DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"),
DecorateInfo(
unittest.skip("testing takes an unreasonably long time, #79528"),
"TestCommon",
"test_compare_cpu",
),
),
supports_one_python_scalar=True,
supports_autograd=False,
),
BinaryUfuncInfo(
"special.chebyshev_polynomial_u",
dtypes=all_types_and(torch.bool),
promotes_int_to_float=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"),
DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"),
DecorateInfo(
unittest.skip("testing takes an unreasonably long time, #79528"),
"TestCommon",
"test_compare_cpu",
),
),
supports_one_python_scalar=True,
supports_autograd=False,
),
BinaryUfuncInfo(
"special.chebyshev_polynomial_v",
dtypes=all_types_and(torch.bool),
promotes_int_to_float=True,
skips=(
DecorateInfo(
unittest.skip(
"Skipping - testing takes an unreasonably long time, #79528"
)
),
DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"),
DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"),
),
supports_one_python_scalar=True,
supports_autograd=False,
),
BinaryUfuncInfo(
"special.chebyshev_polynomial_w",
dtypes=all_types_and(torch.bool),
promotes_int_to_float=True,
skips=(
DecorateInfo(
unittest.skip(
"Skipping - testing takes an unreasonably long time, #79528"
)
),
DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"),
DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"),
),
supports_one_python_scalar=True,
supports_autograd=False,
),
BinaryUfuncInfo(
"special.hermite_polynomial_h",
dtypes=all_types_and(torch.bool),
promotes_int_to_float=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"),
DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"),
# Greatest absolute difference: inf
DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"),
),
supports_one_python_scalar=True,
supports_autograd=False,
),
BinaryUfuncInfo(
"special.hermite_polynomial_he",
dtypes=all_types_and(torch.bool),
promotes_int_to_float=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"),
DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"),
DecorateInfo(
unittest.skip("testing takes an unreasonably long time, #79528"),
"TestCommon",
"test_compare_cpu",
),
),
supports_one_python_scalar=True,
supports_autograd=False,
),
BinaryUfuncInfo(
"special.laguerre_polynomial_l",
dtypes=all_types_and(torch.bool),
promotes_int_to_float=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"),
DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"),
DecorateInfo(
unittest.skip("testing takes an unreasonably long time, #79528"),
"TestCommon",
"test_compare_cpu",
),
),
supports_one_python_scalar=True,
supports_autograd=False,
),
BinaryUfuncInfo(
"special.legendre_polynomial_p",
dtypes=all_types_and(torch.bool),
promotes_int_to_float=True,
skips=(
DecorateInfo(
unittest.skip(
"Skipping - testing takes an unreasonably long time, #79528"
)
),
DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"),
DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"),
DecorateInfo(
unittest.skip("testing takes an unreasonably long time, #79528"),
"TestCommon",
"test_compare_cpu",
),
),
supports_one_python_scalar=True,
supports_autograd=False,
),
UnaryUfuncInfo(
"special.modified_bessel_i0",
decorators=(
precisionOverride(
{
torch.float32: 1e-03,
torch.float64: 1e-05,
},
),
),
dtypes=all_types_and(torch.bool),
ref=scipy.special.i0 if TEST_SCIPY else None,
supports_autograd=False,
),
UnaryUfuncInfo(
"special.modified_bessel_i1",
decorators=(
precisionOverride(
{
torch.float32: 1e-03,
torch.float64: 1e-05,
},
),
),
dtypes=all_types_and(torch.bool),
ref=scipy.special.i1 if TEST_SCIPY else None,
supports_autograd=False,
),
UnaryUfuncInfo(
"special.modified_bessel_k0",
decorators=(
precisionOverride(
{
torch.float32: 1e-03,
torch.float64: 1e-05,
},
),
),
dtypes=all_types_and(torch.bool),
ref=scipy.special.k0 if TEST_SCIPY else None,
supports_autograd=False,
),
UnaryUfuncInfo(
"special.modified_bessel_k1",
decorators=(
precisionOverride(
{
torch.float32: 1e-03,
torch.float64: 1e-05,
},
),
),
dtypes=all_types_and(torch.bool),
ref=scipy.special.k1 if TEST_SCIPY else None,
supports_autograd=False,
),
UnaryUfuncInfo(
"special.scaled_modified_bessel_k0",
decorators=(
toleranceOverride(
{
torch.float32: tol(atol=1e-03, rtol=1e-03),
torch.float64: tol(atol=1e-05, rtol=1e-03),
}
),
),
dtypes=all_types_and(torch.bool),
ref=scipy.special.k0e if TEST_SCIPY else None,
supports_autograd=False,
),
UnaryUfuncInfo(
"special.scaled_modified_bessel_k1",
decorators=(
toleranceOverride(
{
torch.float32: tol(atol=1e-03, rtol=1e-03),
torch.float64: tol(atol=1e-05, rtol=1e-03),
}
),
),
dtypes=all_types_and(torch.bool),
ref=scipy.special.k1e if TEST_SCIPY else None,
supports_autograd=False,
),
BinaryUfuncInfo(
"special.shifted_chebyshev_polynomial_t",
dtypes=all_types_and(torch.bool),
promotes_int_to_float=True,
skips=(
DecorateInfo(
unittest.skip(
"Skipping - testing takes an unreasonably long time, #79528"
)
),
DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"),
DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"),
DecorateInfo(
unittest.skip("testing takes an unreasonably long time, #79528"),
"TestCommon",
"test_compare_cpu",
),
),
supports_one_python_scalar=True,
supports_autograd=False,
),
BinaryUfuncInfo(
"special.shifted_chebyshev_polynomial_u",
dtypes=all_types_and(torch.bool),
promotes_int_to_float=True,
skips=(
DecorateInfo(
unittest.skip(
"Skipping - testing takes an unreasonably long time, #79528"
)
),
DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"),
DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"),
DecorateInfo(
unittest.skip("testing takes an unreasonably long time, #79528"),
"TestCommon",
"test_compare_cpu",
),
),
supports_one_python_scalar=True,
supports_autograd=False,
),
BinaryUfuncInfo(
"special.shifted_chebyshev_polynomial_v",
dtypes=all_types_and(torch.bool),
promotes_int_to_float=True,
skips=(
DecorateInfo(
unittest.skip(
"Skipping - testing takes an unreasonably long time, #79528"
)
),
DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"),
DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"),
DecorateInfo(
unittest.skip("testing takes an unreasonably long time, #79528"),
"TestCommon",
"test_compare_cpu",
),
),
supports_one_python_scalar=True,
supports_autograd=False,
),
BinaryUfuncInfo(
"special.shifted_chebyshev_polynomial_w",
dtypes=all_types_and(torch.bool),
promotes_int_to_float=True,
skips=(
DecorateInfo(
unittest.skip(
"Skipping - testing takes an unreasonably long time, #79528"
)
),
DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"),
DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"),
DecorateInfo(
unittest.skip("testing takes an unreasonably long time, #79528"),
"TestCommon",
"test_compare_cpu",
),
),
supports_one_python_scalar=True,
supports_autograd=False,
),
UnaryUfuncInfo(
"special.spherical_bessel_j0",
decorators=(
toleranceOverride(
{
torch.float32: tol(atol=1e-03, rtol=1e-03),
torch.float64: tol(atol=1e-05, rtol=1e-03),
}
),
),
dtypes=all_types_and(torch.bool),
ref=lambda x: scipy.special.spherical_jn(0, x) if TEST_SCIPY else None,
supports_autograd=False,
),
]
python_ref_db: list[OpInfo] = [
#
# Elementwise Unary Special OpInfos
#
ElementwiseUnaryPythonRefInfo(
"_refs.special.bessel_j0",
torch_opinfo_name="special.bessel_j0",
op_db=op_db,
decorators=(
precisionOverride(
{
torch.float32: 1e-04,
torch.float64: 1e-05,
},
),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.special.bessel_j1",
torch_opinfo_name="special.bessel_j1",
op_db=op_db,
decorators=(
precisionOverride(
{
torch.float32: 1e-04,
torch.float64: 1e-05,
},
),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.special.entr",
torch_opinfo_name="special.entr",
op_db=op_db,
decorators=(precisionOverride({torch.float16: 1e-1, torch.bfloat16: 1e-1}),),
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestUnaryUfuncs",
"test_reference_numerics_large",
dtypes=[torch.bfloat16, torch.float16],
),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.special.erfcx",
torch_opinfo_name="special.erfcx",
op_db=op_db,
decorators=(
toleranceOverride(
{
torch.float32: tol(atol=0, rtol=4e-6),
}
),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.special.i0e",
torch_opinfo_name="special.i0e",
op_db=op_db,
decorators=(precisionOverride({torch.bfloat16: 3e-1, torch.float16: 3e-1}),),
),
ElementwiseUnaryPythonRefInfo(
"_refs.special.i1",
torch_opinfo_name="special.i1",
op_db=op_db,
decorators=(
DecorateInfo(
toleranceOverride(
{
torch.float32: tol(atol=1e-4, rtol=0),
torch.bool: tol(atol=1e-4, rtol=0),
}
)
),
),
skips=(
DecorateInfo(
unittest.skip("Incorrect result!"),
"TestUnaryUfuncs",
"test_reference_numerics_large",
dtypes=(torch.int8,),
),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.special.i1e",
torch_opinfo_name="special.i1e",
op_db=op_db,
),
ElementwiseUnaryPythonRefInfo(
"_refs.special.log_ndtr",
torch_opinfo_name="special.log_ndtr",
op_db=op_db,
),
ElementwiseUnaryPythonRefInfo(
"_refs.special.ndtr",
torch_opinfo_name="special.ndtr",
op_db=op_db,
),
ElementwiseUnaryPythonRefInfo(
"_refs.special.ndtri",
torch_opinfo_name="special.ndtri",
op_db=op_db,
),
ElementwiseUnaryPythonRefInfo(
"_refs.special.spherical_bessel_j0",
torch_opinfo_name="special.spherical_bessel_j0",
op_db=op_db,
decorators=(
toleranceOverride(
{
torch.float32: tol(atol=1e-03, rtol=1e-03),
torch.float64: tol(atol=1e-05, rtol=1e-03),
}
),
),
),
#
# Elementwise Binary Special OpInfos
#
ElementwiseBinaryPythonRefInfo(
"_refs.special.zeta",
torch_opinfo_name="special.zeta",
supports_one_python_scalar=True,
op_db=op_db,
skips=(
# Reference reference_inputs nans and infs on cuda and nan, inf, 0., -inf for cpu
DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"),
),
),
]
```
|
==============================================================================================================================
SOURCE CODE FILE: refs.py
LINES: 1
SIZE: 8.05 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\opinfo\refs.py
ENCODING: utf-8
```py
# mypy: ignore-errors
from torch.testing._internal.opinfo.core import (
BinaryUfuncInfo,
OpInfo,
ReductionOpInfo,
UnaryUfuncInfo,
)
# NOTE [Python References]
# Python References emulate existing PyTorch operations, but can ultimately
# be expressed in terms of "primitive" operations from torch._prims.
#
# These references are experimental.
# See https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-0/577
# for additional context.
#
# Python Reference OpInfos should be added to the python_ref_db list below.
# Tests can opt-into running on these references by including
# that list in the Sequence they pass to the @ops decorator.
#
# When a Python Reference OpInfo is constructed a pointer to an
# existing OpInfo must be provided using the torch_opinfo_name kwarg.
# The existing OpInfo with that name and no variant will be found
# to inherit from.
#
# Instead of just inheriting the existing OpInfo's metadata, the
# Python Reference OpInfos inherit the existing OpInfo's
# construction arguments. These arguments can be overridden
# by adding kwargs to the constructor.
def _find_referenced_opinfo(referenced_name, variant_name, *, op_db=None):
"""
Finds the OpInfo with the given name that has no variant name.
"""
# NOTE: searching the global op_db doesn't work when OpInfos are split into
# different modules, as otherwise the op_db will not be fully constructed
# yet. So, instead the local op_db must be passed in explicitly.
if op_db is None:
from torch.testing._internal.common_methods_invocations import op_db
for opinfo in op_db:
if opinfo.name == referenced_name and opinfo.variant_test_name == variant_name:
return opinfo
def _inherit_constructor_args(name, op, inherited, overrides):
# inherits metadata
common_kwargs = {
"name": name,
"op": op,
"aliases": None, # TODO add a check for alias coverage
"method_variant": None,
"inplace_variant": None, # TODO: add a check for inplace coverage
"supports_scripting": False,
}
# Acquires inherited kwargs
kwargs = inherited.copy()
# Fixes metadata
if "kwargs" in kwargs:
kwargs.update(kwargs["kwargs"])
del kwargs["kwargs"]
if "self" in kwargs:
del kwargs["self"]
if "__class__" in kwargs:
del kwargs["__class__"]
if "skips" in kwargs:
del kwargs["skips"]
if "decorators" in kwargs:
del kwargs["decorators"]
# Overrides metadata
kwargs.update(common_kwargs)
kwargs.update(overrides)
# At the moment no prims support autograd, so we must not run autograd
# tests e.g. when testing dtype support. Once we start writing autograd
# formulas for prims this can be removed.
kwargs["supports_autograd"] = False
kwargs["supports_gradgrad"] = False
kwargs["supports_fwgrad_bwgrad"] = False
kwargs["supports_inplace_autograd"] = False
kwargs["supports_forward_ad"] = False
return kwargs
class PythonRefInfo(OpInfo):
"""
An OpInfo for a Python reference of an OpInfo base class operation.
"""
def __init__(
self,
name, # the stringname of the callable Python reference
*,
op=None, # the function variant of the operation, populated as torch.<name> if None
op_db=None, # The database of opinfos to search for the parent opinfo
torch_opinfo_name, # the string name of the corresponding torch opinfo
torch_opinfo_variant_name="", # the variant name for corresponding torch opinfo
validate_view_consistency=True,
**kwargs,
): # additional kwargs override kwargs inherited from the torch opinfo
self.torch_opinfo_name = torch_opinfo_name
self.torch_opinfo_variant_name = torch_opinfo_variant_name
self.torch_opinfo = _find_referenced_opinfo(
torch_opinfo_name, torch_opinfo_variant_name, op_db=op_db
)
self.validate_view_consistency = validate_view_consistency
assert isinstance(self.torch_opinfo, OpInfo)
inherited = self.torch_opinfo._original_opinfo_args
ukwargs = _inherit_constructor_args(name, op, inherited, kwargs)
super().__init__(**ukwargs)
class ReductionPythonRefInfo(ReductionOpInfo):
"""
An OpInfo for a Python reference of an elementwise unary operation.
"""
def __init__(
self,
name, # the stringname of the callable Python reference
*,
op=None, # the function variant of the operation, populated as torch.<name> if None
op_db=None, # The database of opinfos to search for the parent opinfo
torch_opinfo_name, # the string name of the corresponding torch opinfo
torch_opinfo_variant_name="", # the variant name for corresponding torch opinfo
**kwargs,
): # additional kwargs override kwargs inherited from the torch opinfo
self.torch_opinfo_name = torch_opinfo_name
self.torch_opinfo_variant_name = torch_opinfo_variant_name
self.torch_opinfo = _find_referenced_opinfo(
torch_opinfo_name, torch_opinfo_variant_name, op_db=op_db
)
assert isinstance(self.torch_opinfo, ReductionOpInfo)
inherited = self.torch_opinfo._original_reduction_args
ukwargs = _inherit_constructor_args(name, op, inherited, kwargs)
# See https://github.com/pytorch/pytorch/issues/77216
self.validate_view_consistency = False
super().__init__(**ukwargs)
class ElementwiseUnaryPythonRefInfo(UnaryUfuncInfo):
"""
An OpInfo for a Python reference of an elementwise unary operation.
"""
def __init__(
self,
name, # the stringname of the callable Python reference
*,
op=None, # the function variant of the operation, populated as torch.<name> if None
op_db=None, # The database of opinfos to search for the parent opinfo
torch_opinfo_name, # the string name of the corresponding torch opinfo
torch_opinfo_variant_name="", # the variant name for corresponding torch opinfo
validate_view_consistency=True,
**kwargs,
): # additional kwargs override kwargs inherited from the torch opinfo
self.torch_opinfo_name = torch_opinfo_name
self.torch_opinfo_variant_name = torch_opinfo_variant_name
self.torch_opinfo = _find_referenced_opinfo(
torch_opinfo_name, torch_opinfo_variant_name, op_db=op_db
)
self.validate_view_consistency = validate_view_consistency
assert isinstance(self.torch_opinfo, UnaryUfuncInfo)
inherited = self.torch_opinfo._original_unary_ufunc_args
ukwargs = _inherit_constructor_args(name, op, inherited, kwargs)
super().__init__(**ukwargs)
class ElementwiseBinaryPythonRefInfo(BinaryUfuncInfo):
"""
An OpInfo for a Python reference of an elementwise binary operation.
"""
def __init__(
self,
name, # the stringname of the callable Python reference
*,
op=None, # the function variant of the operation, populated as torch.<name> if None
op_db=None, # The database of opinfos to search for the parent opinfo
torch_opinfo_name, # the string name of the corresponding torch opinfo
torch_opinfo_variant_name="", # the variant name for corresponding torch opinfo
**kwargs,
): # additional kwargs override kwargs inherited from the torch opinfo
self.torch_opinfo_name = torch_opinfo_name
self.torch_opinfo_variant_name = torch_opinfo_variant_name
self.torch_opinfo = _find_referenced_opinfo(
torch_opinfo_name, torch_opinfo_variant_name, op_db=op_db
)
assert isinstance(self.torch_opinfo, BinaryUfuncInfo)
inherited = self.torch_opinfo._original_binary_ufunc_args
ukwargs = _inherit_constructor_args(name, op, inherited, kwargs)
super().__init__(**ukwargs)
```
|
===============================================================================================================================
SOURCE CODE FILE: utils.py
LINES: 1
SIZE: 8.79 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\opinfo\utils.py
ENCODING: utf-8
```py
# mypy: ignore-errors
import collections
import warnings
from collections.abc import Sequence
from functools import partial, wraps
import numpy as np
import numpy.typing as npt
import torch
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_dtype import (
_dispatch_dtypes,
all_types,
all_types_and,
all_types_and_complex,
all_types_and_complex_and,
all_types_and_half,
complex_types,
floating_and_complex_types,
floating_and_complex_types_and,
floating_types,
floating_types_and,
floating_types_and_half,
integral_types,
integral_types_and,
)
from torch.testing._internal.common_utils import torch_to_numpy_dtype_dict
COMPLETE_DTYPES_DISPATCH = (
all_types,
all_types_and_complex,
all_types_and_half,
floating_types,
floating_and_complex_types,
floating_types_and_half,
integral_types,
complex_types,
)
EXTENSIBLE_DTYPE_DISPATCH = (
all_types_and_complex_and,
floating_types_and,
floating_and_complex_types_and,
integral_types_and,
all_types_and,
)
# Better way to acquire devices?
DEVICES = ["cpu"] + (["cuda"] if TEST_CUDA else [])
class _dynamic_dispatch_dtypes(_dispatch_dtypes):
# Class to tag the dynamically generated types.
pass
def get_supported_dtypes(op, sample_inputs_fn, device_type):
# Returns the supported dtypes for the given operator and device_type pair.
assert device_type in ["cpu", "cuda"]
if not TEST_CUDA and device_type == "cuda":
warnings.warn(
"WARNING: CUDA is not available, empty_dtypes dispatch will be returned!"
)
return _dynamic_dispatch_dtypes(())
supported_dtypes = set()
for dtype in all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half):
try:
samples = sample_inputs_fn(op, device_type, dtype, False)
except RuntimeError:
# If `sample_inputs_fn` doesn't support sampling for a given
# `dtype`, we assume that the `dtype` is not supported.
# We raise a warning, so that user knows that this was the case
# and can investigate if there was an issue with the `sample_inputs_fn`.
warnings.warn(
f"WARNING: Unable to generate sample for device:{device_type} and dtype:{dtype}"
)
continue
# We assume the dtype is supported
# only if all samples pass for the given dtype.
supported = True
for sample in samples:
try:
op(sample.input, *sample.args, **sample.kwargs)
except RuntimeError:
# dtype is not supported
supported = False
break
if supported:
supported_dtypes.add(dtype)
return _dynamic_dispatch_dtypes(supported_dtypes)
def dtypes_dispatch_hint(dtypes):
# Function returns the appropriate dispatch function (from COMPLETE_DTYPES_DISPATCH and EXTENSIBLE_DTYPE_DISPATCH)
# and its string representation for the passed `dtypes`.
return_type = collections.namedtuple("return_type", "dispatch_fn dispatch_fn_str")
# CUDA is not available, dtypes will be empty.
if len(dtypes) == 0:
return return_type((), "()")
set_dtypes = set(dtypes)
for dispatch in COMPLETE_DTYPES_DISPATCH:
# Short circuit if we get an exact match.
if set(dispatch()) == set_dtypes:
return return_type(dispatch, dispatch.__name__ + "()")
chosen_dispatch = None
chosen_dispatch_score = 0.0
for dispatch in EXTENSIBLE_DTYPE_DISPATCH:
dispatch_dtypes = set(dispatch())
if not dispatch_dtypes.issubset(set_dtypes):
continue
score = len(dispatch_dtypes)
if score > chosen_dispatch_score:
chosen_dispatch_score = score
chosen_dispatch = dispatch
# If user passed dtypes which are lower than the lowest
# dispatch type available (not likely but possible in code path).
if chosen_dispatch is None:
return return_type((), str(dtypes))
return return_type(
partial(dispatch, *tuple(set(dtypes) - set(dispatch()))),
dispatch.__name__ + str(tuple(set(dtypes) - set(dispatch()))),
)
def is_dynamic_dtype_set(op):
# Detect if the OpInfo entry acquired dtypes dynamically
# using `get_supported_dtypes`.
return op.dynamic_dtypes
def str_format_dynamic_dtype(op):
fmt_str = f"""
OpInfo({op.name},
dtypes={dtypes_dispatch_hint(op.dtypes).dispatch_fn_str},
dtypesIfCUDA={dtypes_dispatch_hint(op.dtypesIfCUDA).dispatch_fn_str},
)
"""
return fmt_str
def np_unary_ufunc_integer_promotion_wrapper(fn):
# Wrapper that passes PyTorch's default scalar
# type as an argument to the wrapped NumPy
# unary ufunc when given an integer input.
# This mimicks PyTorch's integer->floating point
# type promotion.
#
# This is necessary when NumPy promotes
# integer types to double, since PyTorch promotes
# integer types to the default scalar type.
# Helper to determine if promotion is needed
def is_integral(dtype):
return dtype in [
np.bool_,
bool,
np.uint8,
np.int8,
np.int16,
np.int32,
np.int64,
]
@wraps(fn)
def wrapped_fn(x):
# As the default dtype can change, acquire it when function is called.
# NOTE: Promotion in PyTorch is from integer types to the default dtype
np_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()]
if is_integral(x.dtype):
return fn(x.astype(np_dtype))
return fn(x)
return wrapped_fn
def reference_reduction_numpy(f, supports_keepdims=True):
"""Wraps a NumPy reduction operator.
The wrapper function will forward dim, keepdim, mask, and identity
kwargs to the wrapped function as the NumPy equivalent axis,
keepdims, where, and initiak kwargs, respectively.
Args:
f: NumPy reduction operator to wrap
supports_keepdims (bool, optional): Whether the NumPy operator accepts
keepdims parameter. If it does not, the wrapper will manually unsqueeze
the reduced dimensions if it was called with keepdim=True. Defaults to True.
Returns:
Wrapped function
"""
@wraps(f)
def wrapper(x: npt.NDArray, *args, **kwargs):
# Copy keys into a set
keys = set(kwargs.keys())
dim = kwargs.pop("dim", None)
keepdim = kwargs.pop("keepdim", False)
if "dim" in keys:
dim = tuple(dim) if isinstance(dim, Sequence) else dim
# NumPy reductions don't accept dim=0 for scalar inputs
# so we convert it to None if and only if dim is equivalent
if x.ndim == 0 and dim in {0, -1, (0,), (-1,)}:
kwargs["axis"] = None
else:
kwargs["axis"] = dim
if "keepdim" in keys and supports_keepdims:
kwargs["keepdims"] = keepdim
if "mask" in keys:
mask = kwargs.pop("mask")
if mask is not None:
assert mask.layout == torch.strided
kwargs["where"] = mask.cpu().numpy()
if "identity" in keys:
identity = kwargs.pop("identity")
if identity is not None:
if identity.dtype is torch.bfloat16:
identity = identity.cpu().to(torch.float32)
else:
identity = identity.cpu()
kwargs["initial"] = identity.numpy()
result = f(x, *args, **kwargs)
# Unsqueeze reduced dimensions if NumPy does not support keepdims
if keepdim and not supports_keepdims and x.ndim > 0:
dim = list(range(x.ndim)) if dim is None else dim
result = np.expand_dims(result, dim)
return result
return wrapper
def prod_numpy(a, *args, **kwargs):
"""
The function will call np.prod with type as np.int64 if the input type
is int or uint64 if is uint. This is necessary because windows np.prod uses by default
int32 while on linux it uses int64.
This is for fixing integer overflow https://github.com/pytorch/pytorch/issues/77320
Returns:
np.prod of input
"""
if "dtype" not in kwargs:
if np.issubdtype(a.dtype, np.signedinteger):
a = a.astype(np.int64)
elif np.issubdtype(a.dtype, np.unsignedinteger):
a = a.astype(np.uint64)
fn = reference_reduction_numpy(np.prod)
return fn(a, *args, **kwargs)
```
|
===================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.37 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\optests\__init__.py
ENCODING: utf-8
```py
# mypy: ignore-errors
from .make_fx import make_fx_check
from .aot_autograd import aot_autograd_check, _test_aot_autograd_forwards_backwards_helper
from .fake_tensor import fake_check
from .autograd_registration import autograd_registration_check
from .generate_tests import generate_opcheck_tests, opcheck, OpCheckError, dontGenerateOpCheckTests, is_inside_opcheck_mode
```
|
=======================================================================================================================================
SOURCE CODE FILE: aot_autograd.py
LINES: 1
SIZE: 6.43 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\optests\aot_autograd.py
ENCODING: utf-8
```py
# mypy: ignore-errors
import torch
import torch.utils._pytree as pytree
from torch.testing._utils import wrapper_set_seed
from functorch.compile import compiled_function, min_cut_rematerialization_partition, nop
from .make_fx import randomize
import re
class assert_raises_regex:
def __init__(self, exception_cls, regex):
self.exception_cls = exception_cls
self.regex = regex
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, traceback):
if exc_type == self.exception_cls:
msg = str(exc_val)
if not re.search(self.regex, msg):
raise AssertionError(
f"Expected exception to match regex. regex: {self.regex}, exception: {msg}")
return True # Squashes the exception
if exc_type is not None:
raise AssertionError(
f"Expected {self.exception_cls} to be raised, instead got exception {exc_type}")
raise AssertionError("Expected exception to be raised but none was")
def aot_autograd_check(
func,
args,
kwargs,
dynamic,
assert_raises_regex_fn=assert_raises_regex,
assert_equals_fn=torch.testing.assert_close,
check_gradients=True,
try_check_data_specialization=False,
skip_correctness_check=False):
"""Compares func(*args, **kwargs) in eager-mode to under AOTAutograd.
Compares outputs and (if check_gradients=True) gradients produced by
AOTAutograd against eager-mode PyTorch.
We assume that func(*args, **kwargs) succeeds in eager-mode PyTorch.
"""
flat_args, args_spec = pytree.tree_flatten((args, kwargs))
args = [arg for arg in flat_args if isinstance(arg, torch.Tensor)]
# We construct a new function that only accepts Tensors as inputs
def func_no_tensors(args):
reconstructed_flat_args = []
args = iter(args)
for v in flat_args:
if isinstance(v, torch.Tensor):
reconstructed_flat_args.append(next(args))
else:
reconstructed_flat_args.append(v)
c_args, c_kwargs = pytree.tree_unflatten(reconstructed_flat_args, args_spec)
return func(*c_args, **c_kwargs)
compiled_f = compiled_function(
func_no_tensors, nop, nop, dynamic=dynamic, partition_fn=min_cut_rematerialization_partition)
out = wrapper_set_seed(func_no_tensors, args)
if check_gradients == "auto":
any_tensor_requires_grad = pytree.tree_any_only(torch.Tensor, lambda x: x.requires_grad, args)
any_output_requires_grad = pytree.tree_any_only(torch.Tensor, lambda x: x.requires_grad, out)
check_gradients = any_tensor_requires_grad and any_output_requires_grad
if not check_gradients:
compiled_out = wrapper_set_seed(compiled_f, args)
if not skip_correctness_check:
assert_equals_fn(compiled_out, out, msg=outputs_msg)
return
_test_aot_autograd_forwards_backwards_helper(
func_no_tensors, compiled_f, args, assert_raises_regex_fn, assert_equals_fn,
try_check_data_specialization, skip_correctness_check)
outputs_msg = (
"Outputs of the operator are different in eager-mode PyTorch vs "
"AOTDispatcher tracing. This means the operator will have incorrect output "
"underneath torch.compile. This could be because the operator's "
"implementation not traceable."
)
def _test_aot_autograd_forwards_backwards_helper(
f, compiled_f, args, assert_raises_regex_fn, assert_equals_fn,
try_check_data_specialization, skip_correctness_check=False):
# Verify grads are equal between compiled and non-compiled versions of f.
def call_forwards_backwards(f, args):
flat_args = pytree.arg_tree_leaves(*args)
diff_args = [arg for arg in flat_args if isinstance(arg, torch.Tensor) and
arg.requires_grad]
out = wrapper_set_seed(f, args)
flat_out = pytree.tree_leaves(out)
sm = 0
for i in flat_out:
if isinstance(i, torch.Tensor):
# We need to call .abs() because it is possible that the output of the
# operator is a complex Tensor and autograd will yell at autograd.grad
# on a complex Tensor unless we manually provide the grad_output flag.
sm += i.sum().abs()
assert isinstance(sm, torch.Tensor)
return out, torch.autograd.grad(sm, diff_args, allow_unused=True)
def check(args, ignore_failure=False):
try:
orig_out, orig_grad = call_forwards_backwards(f, args)
except Exception:
if ignore_failure:
return
raise
# See https://github.com/pytorch/pytorch/pull/98960#issuecomment-1505962215
tensor_args = [x for x in pytree.tree_flatten(args)[0] if isinstance(x, torch.Tensor)]
any_non_leaves = any(x.grad_fn is not None for x in tensor_args)
if all(x is None for x in orig_grad) and any_non_leaves:
with assert_raises_regex_fn(RuntimeError, 'does not require grad and does not have a grad_fn'):
call_forwards_backwards(compiled_f, args)
return
msg = (
"Gradients of the operator are different in eager-mode PyTorch vs "
"AOTDispatcher. This means the operator will have incorrect gradients "
"underneath torch.compile. This could be because the operator's "
"backward is incorrectly registered or not traceable."
)
compiled_out, compiled_grad = call_forwards_backwards(compiled_f, args)
if not skip_correctness_check:
try:
assert_equals_fn(compiled_out, orig_out)
except Exception as e:
raise type(e)(outputs_msg) from e
try:
assert_equals_fn(compiled_grad, orig_grad)
except Exception as e:
raise type(e)(msg) from e
check(args, ignore_failure=False)
# Randomize the data and run the traced graph with it, to catch bugs
# where we may have baked in Tensor data into the trace.
# This is not guaranteed to succeed, because `f` might have preconditions
# on the values of the inputs, so we just ignore if this test fails.
if try_check_data_specialization:
args = randomize(args)
check(args, ignore_failure=True)
```
|
================================================================================================================================================
SOURCE CODE FILE: autograd_registration.py
LINES: 1
SIZE: 5.69 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\optests\autograd_registration.py
ENCODING: utf-8
```py
# mypy: ignore-errors
import contextlib
import torch
import torch.utils._pytree as pytree
@contextlib.contextmanager
def set_autograd_fallback_mode(mode):
prev = torch._C._get_autograd_fallback_mode()
try:
torch._C._set_autograd_fallback_mode(mode)
yield
finally:
torch._C._set_autograd_fallback_mode(prev)
def autograd_registration_check(op, args, kwargs):
"""Check if autograd was registered correctly (for the operator).
Operators should have "autograd support" registered directly to an
autograd dispatch key.
An incorrect registration may lead to unexpected silent incorrectness.
Note that this check won't catch all problems but will catch
the most common ones.
Example usage:
>>> x = torch.randn(3, requires_grad=True)
>>> autograd_registration_check(torch.ops.aten.sin.default, (x,), {})
Here are some best practices if you do find your autograd is
registered incorrectly:
- If the operator is composite (i.e. consists of other PyTorch ops)
and you wish the operator to decompose and get autograd support
that way, then please register the implementation to
DispatchKey::CompositeImplicitAutograd
- If you're adding an autograd formula for the operator, the correct
thing to do is to register an autograd.Function to
DispatchKey::Autograd (preferred) or one of the
DispatchKey::Autograd<BACKEND> keys. It is NOT OK to register
an autograd.Function to a backend (e.g. CPU/CUDA) key.
- If your operator is non-differentiable, then you should register
an implementation to the Autograd key that uses
AutoDispatchBelowAutograd and re-invokes the operator.
"""
assert isinstance(op, torch._ops.OpOverload)
# Implementation details
# -----------------------------------------------
# If an operator doesn't have an autograd kernel at an autograd key,
# and the operator does not return inputs as-is, then all of
# the outputs should have requires_grad=False before we apply
# special behaviors of our default autograd fallback.
# (The default autograd fallback may set requires_grad=True on output
# tensors in certain modes so that when they are backpropped through,
# they raise an error).
#
# Our strategy for detecting if an operator doesn't have an autograd
# kernel at the autograd key is:
# - set the autograd fallback mode to "nothing" (so it does not change
# the required-gradness of outputs)
# - run the operator
# - Check if any outputs of the operator (that are not inputs) require
# grad. This would only happen if the user calls regular PyTorch
# operations in their backend key (this op should instead be
# CompositeImplicitAutograd or not an op) or if the user invokes
# an autograd.Function in the backend key.
#
# Note that it's already likely a bug if the operator directly returns
# an input as output (because custom ops don't have a good way of
# constructing true in-place or out variants), but we defer that
# responsibility to a different test (schema_check).
flat_args = pytree.arg_tree_leaves(*args, **kwargs)
all_tensors = [arg for arg in flat_args if isinstance(arg, torch.Tensor)]
if not any(t.requires_grad for t in all_tensors):
raise RuntimeError(
"autograd_registration_check: no inputs have requires_grad=True so "
"we are unable to actually perform this test. Please pass inputs "
"that do require grad."
)
# Determine which AutogradBACKEND key to check
all_device_types = {arg.device.type for arg in all_tensors}
if not all_device_types.issubset(["cpu", "cuda"]):
# Don't want to support other keys yet
raise NotImplementedError(
f"autograd_registration_check: NYI devices other than CPU/CUDA, got {all_device_types}"
)
if "cuda" in all_device_types:
key = "AutogradCUDA"
elif "cpu" in all_device_types:
key = "AutogradCPU"
if torch._C._dispatch_has_kernel_for_dispatch_key(op.name(), key):
return
if torch._C._dispatch_has_kernel_for_dispatch_key(op.name(), "Autograd"):
return
if torch._C._dispatch_has_kernel_for_dispatch_key(
op.name(), "CompositeImplicitAutograd"
):
return
# At this point, we know the operator doesn't have a kernel registered to an
# autograd key. Let's proceed with our test.
with set_autograd_fallback_mode("nothing"):
all_outs = op(*args, **kwargs)
inp_ids = {id(arg) for arg in flat_args}
def not_an_input_and_requires_grad(tensor):
if not tensor.requires_grad:
return False
if id(tensor) in inp_ids:
return False
return True
if not pytree.tree_any_only(torch.Tensor, not_an_input_and_requires_grad, all_outs):
return
raise AssertionError(
f"{op.name()}: at least one output of this operator has requires_grad=True "
f"but the operator does not have an autograd kernel defined at an autograd "
f"key (e.g. DispatchKey::Autograd). This could mean that you have "
f"incorrectly registered an autograd kernel to a non-Autograd DispatchKey, "
f"which may lead to silently incorrect results. If your operator consists "
f"of regular PyTorch operations, consider not using an operator at all "
f"or registering your operator as CompositeImplicitAutograd. If you have "
f"an autograd.Function registered to a backend (CPU/CUDA) key, the correct "
f"location for it is the Autograd key."
)
```
|
======================================================================================================================================
SOURCE CODE FILE: fake_tensor.py
LINES: 1
SIZE: 0.26 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\optests\fake_tensor.py
ENCODING: utf-8
```py
# mypy: ignore-errors
import torch._subclasses
def is_builtin(op):
return op.namespace in ('aten', 'prims', 'prim')
def fake_check(op, args, kwargs):
with torch._subclasses.CrossRefFakeMode(ignore_op_fn=is_builtin):
op(*args, **kwargs)
```
|
=========================================================================================================================================
SOURCE CODE FILE: generate_tests.py
LINES: 24
SIZE: 31.86 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\optests\generate_tests.py
ENCODING: utf-8
```py
# mypy: ignore-errors
import datetime
import difflib
import functools
import inspect
import json
import os
import re
import tempfile
import threading
import unittest
from collections.abc import Sequence
from typing import Any, Callable, Optional, Union
import torch
import torch._dynamo
import torch.utils._pytree as pytree
from torch._dynamo.utils import clone_input
from torch._library.custom_ops import CustomOpDef
from torch._subclasses.schema_check_mode import SchemaCheckMode
from torch._utils_internal import get_file_path_2
from torch.overrides import TorchFunctionMode
from torch.testing._internal.optests import (
aot_autograd_check,
autograd_registration_check,
fake_check,
)
def dontGenerateOpCheckTests(reason: str):
def inner(fun):
fun._torch_dont_generate_opcheck_tests = True
return fun
return inner
def is_abstract(tensor: torch.Tensor) -> bool:
if tensor.is_meta:
return True
if torch._subclasses.fake_tensor.is_fake(tensor):
return True
return False
def safe_schema_check(
op: torch._ops.OpOverload,
args: tuple[Any, ...],
kwargs: dict[str, Any],
*,
copy_inputs: bool = True,
rtol: Optional[float] = None,
atol: Optional[float] = None,
) -> Any:
if copy_inputs:
args, kwargs = deepcopy_tensors((args, kwargs))
if pytree.tree_any_only(torch.Tensor, is_abstract, (args, kwargs)):
return None
with SchemaCheckMode():
result = op(*args, **kwargs)
return result
def safe_autograd_registration_check(
op: torch._ops.OpOverload,
args: tuple[Any, ...],
kwargs: dict[str, Any],
*,
copy_inputs: bool = True,
rtol: Optional[float] = None,
atol: Optional[float] = None,
) -> None:
if pytree.tree_any_only(torch.Tensor, is_abstract, (args, kwargs)):
return
if copy_inputs:
args, kwargs = deepcopy_tensors((args, kwargs))
# Don't perform autograd_registration_check if none of the inputs require grad.
if not pytree.tree_any_only(
torch.Tensor, lambda x: x.requires_grad, (args, kwargs)
):
return
return autograd_registration_check(op, args, kwargs)
def safe_fake_check(
op: torch._ops.OpOverload,
args: tuple[Any, ...],
kwargs: dict[str, Any],
*,
copy_inputs: bool = True,
rtol: Optional[float] = None,
atol: Optional[float] = None,
) -> None:
if pytree.tree_any_only(torch.Tensor, is_abstract, (args, kwargs)):
return None
if copy_inputs:
args, kwargs = deepcopy_tensors((args, kwargs))
return fake_check(op, args, kwargs)
def safe_aot_autograd_check(
op: torch._ops.OpOverload,
args: tuple[Any, ...],
kwargs: dict[str, Any],
dynamic: bool,
*,
copy_inputs: bool = True,
rtol: Optional[float] = None,
atol: Optional[float] = None,
) -> Any:
# NB: copy_inputs does nothing for aot_autograd_check: it always needs to copy
# inputs.
if pytree.tree_any_only(torch.Tensor, is_abstract, (args, kwargs)):
return None
def func(*args, **kwargs):
args, kwargs = pytree.tree_map_only(torch.Tensor, torch.clone, (args, kwargs))
return op(*args, **kwargs)
# aot_autograd_check runs func(*args, **kwargs) multiple times
# and assumes `func` does not modify its inputs.
if rtol and atol:
assert_equals_fn = functools.partial(
torch.testing.assert_close, rtol=rtol, atol=atol
)
else:
assert_equals_fn = torch.testing.assert_close
return aot_autograd_check(
func,
args,
kwargs,
dynamic,
check_gradients="auto",
assert_equals_fn=assert_equals_fn,
)
def deepcopy_tensors(inputs: Any) -> Any:
return pytree.tree_map_only(torch.Tensor, clone_input, inputs)
# Test util requirements
# - The test util must have signature (op: OpOverload, args, kwargs)
# - The test util must NOT mutate args, kwargs.
# - The test utils in this list must not be prefixes of each other. For example,
# having both "test_schema" and "test_schema_is_functional" is NOT OK.
# - The order of items in this dict matters (for opcheck), we'll run them
# in order.
ALL_TEST_UTILS = {
"test_schema": safe_schema_check,
"test_autograd_registration": safe_autograd_registration_check,
"test_faketensor": safe_fake_check,
"test_aot_dispatch_static": functools.partial(
safe_aot_autograd_check,
dynamic=False,
),
"test_aot_dispatch_dynamic": functools.partial(
safe_aot_autograd_check,
dynamic=True,
),
}
GDOC = "https://docs.google.com/document/d/1Pj5HRZvdOq3xpFpbEjUZp2hBovhy7Wnxw14m6lF2154/edit"
DEFAULT_TEST_UTILS = [
"test_schema",
"test_autograd_registration",
"test_faketensor",
"test_aot_dispatch_dynamic",
]
DEPRECATED_DEFAULT_TEST_UTILS = DEFAULT_TEST_UTILS + [
"test_aot_dispatch_static",
]
def generate_opcheck_tests(
testcase: Any,
namespaces: list[str],
failures_dict_path: Optional[str] = None,
additional_decorators: Optional[dict[str, Callable]] = None,
test_utils: list[str] = DEFAULT_TEST_UTILS,
) -> None:
"""Given an existing TestCase, use the existing tests to generate
additional validation tests for custom operators.
For {all existing tests in the TestCase} x {all test utils},
we will generate one new test. The new test runs a TorchFunctionMode
that intercepts ``op(*args, **kwargs)`` calls and invokes
``test_util(op, *args, **kwargs)``, where ``op`` is an operator.
The test_util that we support are in ALL_TEST_UTILS. They are:
- test_schema: This runs SchemaCheckMode.
- test_autograd_registration: This runs autograd_registration_check.
- test_faketensor: This runs CrossRefFakeMode.
- test_aot_dispatch_static: This runs aot_autograd_check, which:
checks that the outputs (and gradients, if they are computable)
are the same under eager-mode PyTorch and using AOTAutograd.
- test_aot_dispatch_dynamic: Same as aot_dispatch_static, but
runs AOTAutograd using dynamic shapes instead of static shapes.
The generated test will have name ``{test_util}__{original_name}``.
For example, if there is a method named ``test_cumsum``, then
we will generate a ``test_schema__test_cumsum``,
``test_faketensor__test_cumsum``, etc.
For more details, see https://docs.google.com/document/d/1Pj5HRZvdOq3xpFpbEjUZp2hBovhy7Wnxw14m6lF2154/edit
Args:
testcase: The testcase we will modify and generate additional tests for.
namespaces: We will only intercept calls to custom operators with these
namespaces.
failures_dict_path: See ``validate_failures_dict_structure`` for more details
test_utils: a list of test_utils to generate. Example: ["test_schema", "test_faketensor"]
"""
if additional_decorators is None:
additional_decorators = {}
test_methods = [
m
for m in dir(testcase)
if m.startswith("test_") and callable(getattr(testcase, m))
]
if failures_dict_path is None:
# The default failures_dict_path is failures_dict.json in
# the same directory as the test file.
prev_frame = inspect.currentframe().f_back
filename = inspect.getframeinfo(prev_frame)[0]
failures_dict_path = get_file_path_2(
os.path.dirname(filename), "failures_dict.json"
)
failures_dict = FailuresDict.load(
failures_dict_path, create_file=should_update_failures_dict()
)
validate_failures_dict_structure(failures_dict, test_utils, testcase)
validate_failures_dict_formatting(failures_dict_path)
def construct_method(attr, prefix, tester):
method = getattr(testcase, attr)
if getattr(method, "_torch_dont_generate_opcheck_tests", False):
return
new_method_name = prefix + "__" + attr
@functools.wraps(method)
def new_method(*args, **kwargs):
with OpCheckMode(
namespaces,
prefix,
tester,
failures_dict,
f"{testcase.__name__}.{new_method_name}",
failures_dict_path,
):
result = method(*args, **kwargs)
return result
if pytestmark := new_method.__dict__.get("pytestmark"):
import pytest
# check if we need to simplify the parametrize marks
# NB: you need to add this mark to your pytest.ini
opcheck_only_one = False
for mark in pytestmark:
if isinstance(mark, pytest.Mark) and mark.name == "opcheck_only_one":
opcheck_only_one = True
if opcheck_only_one:
new_pytestmark = []
for mark in pytestmark:
if isinstance(mark, pytest.Mark) and mark.name == "parametrize":
argnames, argvalues = mark.args
assert not mark.kwargs, "NYI"
# Special case for device, we want to run on all
# devices
if argnames != "device":
new_pytestmark.append(
pytest.mark.parametrize(
argnames, (next(iter(argvalues)),)
)
)
continue
new_pytestmark.append(mark)
new_method.__dict__["pytestmark"] = new_pytestmark
if new_method_name in additional_decorators:
for dec in additional_decorators[new_method_name]:
new_method = dec(new_method)
if hasattr(testcase, new_method_name):
raise RuntimeError(
f"Tried to autogenerate {new_method_name} but {testcase} already "
f"has method named {new_method_name}. Please rename the original "
f"method on the TestCase."
)
setattr(testcase, new_method_name, new_method)
test_utils = {name: ALL_TEST_UTILS[name] for name in test_utils}
for attr in test_methods:
for prefix, tester in test_utils.items():
construct_method(attr, prefix, tester)
generate_tag_tests(testcase, failures_dict, additional_decorators)
def generate_tag_tests(testcase, failures_dict, additional_decorators):
def generate_test(qualname, definitely_not_pt2_compliant, xfailed_tests):
def inner(self):
try:
op = torch._library.utils.lookup_op(qualname)
except AttributeError as e:
# Operator not importable in this test file
raise unittest.SkipTest(f"Can't import operator {qualname}") from e
op_marked_as_compliant = torch.Tag.pt2_compliant_tag in op.tags
if not op_marked_as_compliant:
return
if not definitely_not_pt2_compliant:
return
raise AssertionError(
f"op '{qualname}' was tagged with torch.Tag.pt2_compliant_tag "
f"but it failed some of the generated opcheck tests "
f"({xfailed_tests}). This may lead to silent correctness issues, "
f"please fix this."
)
return inner
for qualname, test_dict in failures_dict.data.items():
xfailed_tests = [
test
for test, status_dict in test_dict.items()
# We're about to delete the following test after Ed's PR
# to specialize on C++ .size() calls
if "test_aot_dispatch_static" not in test
and status_dict["status"] == "xfail"
]
definitely_not_pt2_compliant = len(xfailed_tests) > 0
generated = generate_test(qualname, definitely_not_pt2_compliant, xfailed_tests)
# Could result in collisions, but unlikely. We'll raise if we see one below.
mangled_qualname = qualname.replace("::", "_").replace(".", "_")
test_name = "test_pt2_compliant_tag_" + mangled_qualname
# You can skip this test via the additional_decorators argument
# in generate_opcheck_tests
if test_name in additional_decorators:
for decorator in additional_decorators[test_name]:
generated = decorator(generated)
if hasattr(testcase, test_name):
raise RuntimeError(
f"Tried to generate a test named {test_name}, but it exists "
f"already. This could be because of a name collision (where "
f"we generated two tests with the same name), or where we "
f"generated a test with the same name as an existing test."
)
setattr(testcase, test_name, generated)
TEST_OPTIONS = ("xfail", "skip", "xsuccess")
def validate_failures_dict_formatting(failures_dict_path: str) -> None:
with open(failures_dict_path) as fp:
actual = fp.read()
failures_dict = FailuresDict.load(failures_dict_path)
expected = failures_dict._save(to_str=True)
if actual == expected:
return
if should_update_failures_dict():
failures_dict = FailuresDict.load(failures_dict_path)
failures_dict.save()
return
expected = expected.splitlines(1)
actual = actual.splitlines(1)
diff = difflib.unified_diff(actual, expected)
diff = "".join(diff)
raise RuntimeError(
f"\n{diff}\n\nExpected the failures dict to be formatted "
f"a certain way. Please see the above diff; you can correct "
f"this either manually or by re-running the test with "
f"PYTORCH_OPCHECK_ACCEPT=1"
)
def validate_failures_dict_structure(
failure_dict: "FailuresDict", test_utils: list[str], testcase: Any
) -> None:
"""Validates the failures dict.
The failure dict looks something like the following.
It maps operator name (qualname) to a list of autogenerated tests.
Each autogenerated test may have a check for the operator (if the operator is
called by the test); the dictionary specifies if we should skip the check,
or if we expect some check to fail.
{
"fbgemm::split_lengths": {
"test_schema__test_split_lengths": {
"comment": "you can put whatever you want into the comment section",
"status": "xfail",
}
"test_schema__test_split_lengths_empty": {
"comment": "",
"status": "skip",
},
},
"fbgemm::gather_lengths": {
"test_schema__test_gather_lengths": {
"comment": "",
"status": "skip",
},
},
}
"""
failure_dict = failure_dict.data
for test_to_option in failure_dict.values():
for test_name, test_dict in test_to_option.items():
if set(test_dict.keys()) != set({"comment", "status"}):
raise RuntimeError(
"in failures_dict, expected sub-dict to have keys 'comment' and 'status'"
)
test_option = test_dict["status"]
if test_option not in TEST_OPTIONS:
raise RuntimeError(
f"In failures_dict, got status={test_option} but it needs to be in {TEST_OPTIONS}"
)
test_class, actual_test_name = test_name.split(".")
if not any(actual_test_name.startswith(test) for test in test_utils):
raise RuntimeError(
f"In failures_dict, test name '{test_name}' should begin with one of {test_utils}"
)
for test in test_utils:
if not actual_test_name.startswith(test):
continue
base_test_name = actual_test_name[len(test) + 2 :]
# remove potential pytest parametrization suffix
base_test_name = re.sub(r"\[.*\]", "", base_test_name)
if testcase.__name__ != test_class:
continue
if hasattr(testcase, base_test_name):
continue
raise RuntimeError(
f"In failures dict, got test name '{test_name}'. We parsed this as "
f"running test '{test}' on '{base_test_name}', but "
f"{base_test_name} does not exist on the TestCase '{testcase.__name__}]. "
f"Maybe you need to change the test name?"
)
def should_update_failures_dict() -> bool:
key = "PYTORCH_OPCHECK_ACCEPT"
return key in os.environ and os.environ[key] == "1"
_is_inside_opcheck_mode = threading.local()
_is_inside_opcheck_mode.value = False
def is_inside_opcheck_mode():
return _is_inside_opcheck_mode.value
class OpCheckMode(TorchFunctionMode):
"""
For a given test, OpCheckMode intercepts calls to operators and runs
test_util(op, args, kwargs) for each intercepted (op, args, kwargs).
"""
def __init__(
self,
namespaces: list[str],
test_util_name: str,
test_util: Callable,
failures_dict: "FailuresDict",
test_name: str,
failures_dict_path: str,
):
# We will intercept calls to ops with these namespaces
self.namespaces = namespaces
# The test utility function. Its signature should be (op, args, kwargs) -> None.
# Examples of test utilities are: schema_check, make_fx_check
self.test_util = test_util
self.test_util_name = test_util_name
# The name of the test that is running this OpCheckMode.
self.test_name = test_name
# Maps qualname -> test_name -> skip/xfail
# Tells us if we should skip a test or assert that there is a failure.
self.failures_dict = failures_dict
# Location of the failures dict. Makes it so that the error message is better.
self.failures_dict_path = failures_dict_path
# OpCheckMode surpresses errors, collects them here, and then raises them on exit.
# Maps qualname -> List[(Exception, func, maybe args, maybe kwargs)]
self.seen_ops_to_errors = {}
def maybe_raise_errors_on_exit(self) -> None:
# Check expected failures first
for qualname in self.seen_ops_to_errors.keys():
option = self.failures_dict.get_status(qualname, self.test_name)
if len(self.seen_ops_to_errors[qualname]) == 0:
if should_update_failures_dict():
self.failures_dict.set_status(
qualname, self.test_name, "xsuccess", comment=""
)
else:
if option == "xfail":
raise OpCheckError(
f"generate_opcheck_tests: Unexpected success for operator "
f"{qualname} on test {self.test_name}. This may mean that "
f"you have fixed this test failure. Please rerun the test with "
f"PYTORCH_OPCHECK_ACCEPT=1 to automatically update the test runner "
f"or manually remove the "
f"expected failure in the failure dict at "
f"{self.failures_dict_path}"
f"For more details, see "
f"{GDOC}"
)
continue
failed_ops = []
for qualname in self.seen_ops_to_errors.keys():
option = self.failures_dict.get_status(qualname, self.test_name)
if option != "xsuccess":
continue
if len(self.seen_ops_to_errors[qualname]) == 0:
continue
failed_ops.append(qualname)
if not failed_ops:
return
if should_update_failures_dict():
for op in failed_ops:
self.failures_dict.set_status(op, self.test_name, "xfail")
return
# Raise from the first error but also report about all of them to make
# recording xfails easier.
ex, op, args, kwargs = self.seen_ops_to_errors[failed_ops[0]][0]
repro_command = generate_repro(
self.test_util_name, op, args, kwargs, save_data=should_print_better_repro()
)
raise OpCheckError(
f"Test generated by `generate_opcheck_tests`, {self.test_name}, "
f"failed on operators {failed_ops}. This usually means that the "
f"operators are not implemented correctly and may lead to silently "
f"incorrect behavior. Set PYTORCH_OPCHECK_PRINT_BETTER_REPRO=1 for a standalone repro, "
f"or please see "
f"{GDOC} "
f"for more recommendations. "
f"To reproduce this problem locally, try to run the following:\n{repro_command}"
) from ex
def __enter__(self, *args, **kwargs):
self.prev_is_opcheck_mode = _is_inside_opcheck_mode.value
self.prev_dynamo_disable = os.environ.get("TORCHDYNAMO_DISABLE", "")
_is_inside_opcheck_mode.value = True
os.environ["TORCHDYNAMO_DISABLE"] = "1"
return super().__enter__(*args, **kwargs)
def __exit__(self, *args, **kwargs):
_is_inside_opcheck_mode.value = self.prev_is_opcheck_mode
os.environ["TORCHDYNAMO_DISABLE"] = self.prev_dynamo_disable
try:
self.maybe_raise_errors_on_exit()
if should_update_failures_dict():
self.failures_dict.save()
finally:
result = super().__exit__(*args, **kwargs)
return result
def run_test_util(self, op, args, kwargs):
try:
self.test_util(op, args, kwargs, copy_inputs=False)
except torch._subclasses.fake_tensor.UnsupportedFakeTensorException:
# We might get here if the input is already a FakeTensor
# or if we're in a torch.compile block. Just ignore these
# since we can't handle them and reporting them as failures
# is too noisy.
pass
def __torch_function__(self, func, types, args=(), kwargs=None):
kwargs = kwargs if kwargs else {}
# Only intercept calls to operators
if not isinstance(func, (torch._ops.OpOverloadPacket, torch._ops.OpOverload)):
return func(*args, **kwargs)
if (
torch.jit.is_tracing()
or torch.jit.is_scripting()
or torch._dynamo.is_compiling()
):
return func(*args, **kwargs)
# Pre-existing code may not use the .default overload. If we see an
# OpOverloadPacket and we cannot resolve the overload, then we just throw
# and ask the user to clarify. Otherwise, we attempt to resolve the overload.
if isinstance(func, torch._ops.OpOverloadPacket):
func = resolve_unique_overload_or_throw(func)
qualname = func.name()
ns = qualname.split("::")[0]
if ns not in self.namespaces:
return func(*args, **kwargs)
args_c, kwargs_c = deepcopy_tensors((args, kwargs))
result = func(*args, **kwargs)
option = self.failures_dict.get_status(qualname, self.test_name)
if option == "xsuccess" or option == "xfail":
# Surpress all errors during execution. Raise them during __exit__.
try:
if qualname not in self.seen_ops_to_errors:
self.seen_ops_to_errors[qualname] = []
self.run_test_util(func, args_c, kwargs_c)
except Exception as ex:
if should_print_better_repro():
self.seen_ops_to_errors[qualname].append((ex, func, args, kwargs))
else:
self.seen_ops_to_errors[qualname].append((ex, func, None, None))
elif option == "skip":
pass
return result
def should_print_better_repro() -> None:
"""If set, the tests generated by `generate_opcheck_tests` will print a
repro command on failure.
In order to print the repro command, we need to save some tensors to disk.
These will be saved under the following directory:
{tempfile.gettempdir()}/pytorch_opcheck_safe_to_delete/.
Although this is a temp folder, it will usually not automatically get cleaned
up, so you'll need to manually delete it.
"""
key = "PYTORCH_OPCHECK_PRINT_BETTER_REPRO"
if key not in os.environ:
return False
value = os.environ[key]
return value == "1" or value == 1
def opcheck(
op: Union[torch._ops.OpOverload, torch._ops.OpOverloadPacket, CustomOpDef],
args: tuple[Any, ...],
kwargs: Optional[dict[str, Any]] = None,
*,
test_utils: Union[str, Sequence[str]] = DEFAULT_TEST_UTILS,
raise_exception: bool = True,
rtol: Optional[float] = None,
atol: Optional[float] = None,
) -> dict[str, str]:
"""See torch.library.opcheck for docstring"""
if (rtol is None) ^ (atol is None):
raise ValueError(
"opcheck(op, ...): if you specify one of rtol/atol, you must specify both"
)
if kwargs is None:
kwargs = {}
if isinstance(op, CustomOpDef):
op = op._opoverload
if isinstance(op, torch._ops.OpOverloadPacket):
op = resolve_unique_overload_or_throw(op)
if not isinstance(op, torch._ops.OpOverload):
raise ValueError(
f"opcheck(op, ...): op must be instance of torch._ops.OpOverload, "
f"e.g. torch.ops.aten.sin.default, got {type(op)}"
)
if test_utils == "ALL":
test_utils = tuple(ALL_TEST_UTILS.keys())
if isinstance(test_utils, str):
test_utils = (test_utils,)
if not isinstance(test_utils, (tuple, list)) or not set(test_utils).issubset(
ALL_TEST_UTILS.keys()
):
raise ValueError(
f"opcheck(op, ..., test_utils={test_utils}), expected test_utils "
f"to be subset of {tuple(ALL_TEST_UTILS.keys())} but it was not"
)
results_dict = {}
for test_util in test_utils:
tester = ALL_TEST_UTILS[test_util]
try:
tester(op, args, kwargs, rtol=rtol, atol=atol)
results_dict[test_util] = "SUCCESS"
except Exception as ex:
if raise_exception:
raise OpCheckError(
f"opcheck(op, ...): {test_util} failed with {ex} "
f"(scroll up for stack trace)"
) from ex
results_dict[test_util] = ex
return results_dict
class OpCheckError(Exception):
pass
def generate_repro(
test: str,
op: torch._ops.OpOverload,
args: tuple[Any, ...],
kwargs: dict[str, Any],
*,
save_data: bool,
dry_run: bool = False,
) -> str:
if save_data:
now = datetime.datetime.now()
path = os.path.join(tempfile.gettempdir(), "pytorch_opcheck_safe_to_delete")
unix_timestamp = datetime.datetime.timestamp(now) * 100000
filepath = os.path.join(path, f"repro_{unix_timestamp}.pt")
if not dry_run:
os.makedirs(path, exist_ok=True)
torch.save((args, kwargs), filepath)
args_kwargs = f'args, kwargs = torch.load("{filepath}")'
else:
args_kwargs = (
"# If you rerun your test with PYTORCH_OPCHECK_PRINT_BETTER_REPRO=1\n"
"# we will fill them in same (args, kwargs) as in your test\n"
"args = () # args to the operator\n"
"kwargs = {} # kwargs to the operator"
)
ns, name = op._schema.name.split("::")
overload = op._overloadname
repro_command = (
f"# =========================================================\n"
f"# BEGIN REPRO SCRIPT\n"
f"# =========================================================\n"
f"import torch\n"
f"from torch.testing._internal.optests import opcheck\n"
f"\n"
f"# Make sure you have loaded the library that contains the op\n"
f"# via an import or torch.ops.load_library(...)\n"
f"op = torch.ops.{ns}.{name}.{overload}\n"
f"\n"
f"{args_kwargs}\n"
f'opcheck(op, args, kwargs, test_utils="{test}")\n'
f"# =========================================================\n"
f"# END REPRO SCRIPT\n"
f"# =========================================================\n"
)
return repro_command
def resolve_unique_overload_or_throw(
op: torch._ops.OpOverloadPacket,
) -> torch._ops.OpOverload:
all_schemas = torch._C._jit_get_schemas_for_operator(op._qualified_op_name)
if len(all_schemas) != 1:
raise RuntimeError(
f"opcheck can only test operators without overloads. "
f"Got the following overloads for {op._qualified_op_name}: "
f"{[schema.overload_name for schema in all_schemas]}"
)
overload_name = all_schemas[0].overload_name
if overload_name == "":
return op.default
return getattr(op, overload_name)
DUMP_OPTIONS = {"indent": 2, "sort_keys": True}
FailuresDictData = dict[str, dict[str, dict[str, str]]]
VERSION = 1
DESCRIPTION = (
f"This is a dict containing failures for tests autogenerated by "
f"generate_opcheck_tests. "
f"For more details, please see {GDOC}"
)
class FailuresDict:
def __init__(self, path: str, data: FailuresDictData):
self.path = path
self.data = data
@staticmethod
def load(path, *, create_file=False) -> "FailuresDict":
if create_file and not os.path.exists(path):
result = FailuresDict(path, {})
FailuresDict.save()
return result
with open(path) as fp:
contents = fp.read()
if contents.strip() == "":
dct = {
"_description": DESCRIPTION,
"data": {},
"_version": VERSION,
}
else:
dct = json.loads(contents)
assert "data" in dct
assert "_version" in dct and dct["_version"] == VERSION
return FailuresDict(path, dct["data"])
def _save(self, to_str=False) -> Optional[str]:
to_dump = {
"_description": DESCRIPTION,
"data": self.data,
"_version": VERSION,
}
# json.dumps doesn't end with a newline. Let's add one because files
# should end in newlines.
serialized = json.dumps(to_dump, **DUMP_OPTIONS) + "\n"
if to_str:
return serialized
with open(self.path, "w") as fp:
fp.write(serialized)
return None
def save(self) -> None:
return self._save()
def get_status(self, qualname: str, test_name: str) -> str:
if qualname not in self.data:
return "xsuccess"
dct = self.data[qualname]
if test_name not in dct:
return "xsuccess"
return dct[test_name]["status"]
def set_status(
self,
qualname: str,
test_name: str,
status: str,
*,
comment: Optional[str] = None,
):
if qualname not in self.data:
self.data[qualname] = {}
dct = self.data[qualname]
if test_name not in dct:
dct[test_name] = {"status": None, "comment": ""}
if status == "xsuccess":
# The default status is "xsuccess".
del dct[test_name]
else:
dct[test_name]["status"] = status
if comment is not None:
dct[test_name]["comment"] = comment
```
|
==================================================================================================================================
SOURCE CODE FILE: make_fx.py
LINES: 1
SIZE: 3.28 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\optests\make_fx.py
ENCODING: utf-8
```py
# mypy: ignore-errors
import torch
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing._utils import wrapper_set_seed
import torch.utils._pytree as pytree
def make_fx_check(
func,
args,
kwargs,
tracing_mode,
assert_close=torch.testing.assert_close,
randomize_data=False,
):
f, *new_args = handle_sizes_for_dynamic_shapes(func, args, kwargs)
def run(f, *args, **kwargs):
return wrapper_set_seed(f, *args, **kwargs)
traced_f = make_fx(f, tracing_mode=tracing_mode)(*new_args)
msg = (
"op(*args, **kwargs) and make_fx(op)(*args, **kwargs) produced different "
"values. This could mean that your abstract impls (meta/FakeTensor impls) "
"are incorrect, that your operator is not completely traceable (e.g., "
"it relies on some global state), or that there is a bug in make_fx. "
"Note that if you passed a python function (and not an operator) to "
"make_fx_check, it is still possible that the python function will still "
"work with torch.compile because it handles capturing pieces of "
"your python code to compile."
)
# Randomize the data and run the traced graph with it, to catch bugs
# where we may have baked in Tensor data into the trace.
# This is not guaranteed to succeed, because `f` might have preconditions
# on the values of the inputs, so we just ignore if we used
# random data and it fails.
if randomize_data:
new_args = randomize(new_args)
try:
expected = run(f, *new_args)
except Exception:
if randomize_data:
return
raise
result = run(traced_f, *new_args)
assert_close(result, expected, msg=msg)
# Arguably we should make make_fx promote torch.Size() objects to symbolic shapes.
# Absent that, here is our strategy:
#
# If any argument is a torch.Size(), maybe get dynamic shapes for it by:
# - Create a temporary Tensor whose size is the torch.Size() we want. Note that
# we use an expanded Tensor as we cannot pass "meta" Tensors to make_fx.
# - Pass it to make_fx such that it is is converted to a proxy Tensor
# - Unpack the size in the wrapper to get a torch.Size with dynamic shapes (in
# symbolic mode, a no-op otherwise)
def handle_sizes_for_dynamic_shapes(func, args, kwargs):
def f(args, kwargs, extra_args, extra_kwargs):
if extra_args:
for i, t in extra_args:
args[i] = t.size()
if extra_kwargs:
for k, t in extra_kwargs.items():
kwargs[k] = t.size()
return func(*args, **kwargs)
extra_args = []
extra_kwargs = {}
for i, arg in enumerate(args):
if isinstance(arg, torch.Size):
extra_args.append((i, torch.empty(arg, device="cpu")))
for key, value in kwargs.items():
if isinstance(value, torch.Size):
extra_kwargs[key] = torch.empty(value, device="cpu")
return f, args, kwargs, extra_args, extra_kwargs
def randomize(args):
def transform(x):
if not x.dtype.is_floating_point:
return x
return x.detach().clone().uniform_(0, 1).requires_grad_(x.requires_grad)
return pytree.tree_map_only(torch.Tensor, transform, args)
```
|
====================================================================================================================================================
SOURCE CODE FILE: quantization_torch_package_models.py
LINES: 1
SIZE: 0.96 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\quantization_torch_package_models.py
ENCODING: utf-8
```py
# mypy: ignore-errors
import math
import torch
import torch.nn as nn
class LinearReluFunctionalChild(nn.Module):
def __init__(self, N):
super().__init__()
self.w1 = nn.Parameter(torch.empty(N, N))
self.b1 = nn.Parameter(torch.zeros(N))
torch.nn.init.kaiming_uniform_(self.w1, a=math.sqrt(5))
def forward(self, x):
x = torch.nn.functional.linear(x, self.w1, self.b1)
x = torch.nn.functional.relu(x)
return x
class LinearReluFunctional(nn.Module):
def __init__(self, N):
super().__init__()
self.child = LinearReluFunctionalChild(N)
self.w1 = nn.Parameter(torch.empty(N, N))
self.b1 = nn.Parameter(torch.zeros(N))
torch.nn.init.kaiming_uniform_(self.w1, a=math.sqrt(5))
def forward(self, x):
x = self.child(x)
x = torch.nn.functional.linear(x, self.w1, self.b1)
x = torch.nn.functional.relu(x)
return x
```
|
================================================================================================================================
SOURCE CODE FILE: static_module.py
LINES: 1
SIZE: 0.90 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\static_module.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
# Owner(s): ["module: unknown"]
import torch
class StaticModule:
def __init__(self, scripted):
# this is an nn.Module
if hasattr(scripted, "_c"):
self.static_module = torch._C._jit_to_static_module(scripted._c)
else:
self.static_module = torch._C._jit_to_static_module(scripted.graph)
def __call__(self, *args, **kwargs):
return self.static_module(*args, **kwargs)
def benchmark(self, args, kwargs, warmup_runs, main_runs):
self.static_module.benchmark(args, kwargs, warmup_runs, main_runs)
def runAsync(self, args, kwargs):
return self.static_module.runAsync(args, kwargs)
def benchmark_individual_ops(self, args, kwargs, warmup_runs, main_runs):
return self.static_module.benchmark_individual_ops(
args, kwargs, warmup_runs, main_runs
)
```
|
=============================================================================================================================
SOURCE CODE FILE: subclasses.py
LINES: 1
SIZE: 2.55 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\subclasses.py
ENCODING: utf-8
```py
# mypy: ignore-errors
from typing import Any, Optional
import torch
import torch.utils._pytree as pytree
from torch._subclasses.fake_tensor import is_fake
from torch.testing._internal.two_tensor import TwoTensor
from torch.utils._python_dispatch import return_and_correct_aliasing
class WrapperSubclass(torch.Tensor):
@staticmethod
def __new__(cls, a, outer_size=None, outer_stride=None):
if outer_size is None:
outer_size = a.size()
if outer_stride is None:
outer_stride = a.stride()
kwargs = {}
kwargs["strides"] = outer_stride
kwargs["storage_offset"] = a.storage_offset()
kwargs["device"] = a.device
kwargs["layout"] = a.layout
kwargs["requires_grad"] = a.requires_grad
kwargs["dtype"] = a.dtype
out = torch.Tensor._make_wrapper_subclass(cls, outer_size, **kwargs)
return out
def __init__(self, a, outer_size=None, outer_stride=None):
self.a = a
def __repr__(self):
return f"WrapperSubclass({repr(self.a)})"
def __tensor_flatten__(self):
return ["a"], None
@staticmethod
def __tensor_unflatten__(inner_tensors, meta, outer_size, outer_stride):
assert meta is None
a = inner_tensors["a"]
if is_fake(a):
assert outer_size is not None
assert outer_stride is not None
return WrapperSubclass(a, outer_size, outer_stride)
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs):
if kwargs is None:
kwargs = {}
args_a = pytree.tree_map_only(WrapperSubclass, lambda x: x.a, args)
kwargs_a = pytree.tree_map_only(WrapperSubclass, lambda x: x.a, kwargs)
out_a = func(*args_a, **kwargs_a)
out_a_flat, spec = pytree.tree_flatten(out_a)
out_flat = [
WrapperSubclass(o_a) if isinstance(o_a, torch.Tensor) else o_a
for o_a in out_a_flat
]
out = pytree.tree_unflatten(out_flat, spec)
from torch._higher_order_ops.cond import cond_op
if func is cond_op:
return out
else:
return return_and_correct_aliasing(func, args, kwargs, out)
def __coerce_same_metadata_as_tangent__(
self, expected_metadata: Any, expected_type: Optional[type] = None
):
if expected_type == type(self.a):
return self.a
elif expected_type is TwoTensor:
return TwoTensor(self.a, self.a.clone())
return None
```
|
=======================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.00 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\test_module\__init__.py
ENCODING: utf-8
```py
```
|
=========================================================================================================================================
SOURCE CODE FILE: future_div.py
LINES: 1
SIZE: 0.12 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\test_module\future_div.py
ENCODING: utf-8
```py
# mypy: ignore-errors
def div_int_future():
return 1 / 2
def div_float_future():
return 3.14 / 0.125
```
|
============================================================================================================================================
SOURCE CODE FILE: no_future_div.py
LINES: 1
SIZE: 0.15 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\test_module\no_future_div.py
ENCODING: utf-8
```py
# mypy: ignore-errors
import torch # noqa: F401
def div_int_nofuture():
return 1 / 2
def div_float_nofuture():
return 3.14 / 0.125
```
|
==================================================================================================================================
SOURCE CODE FILE: torchbind_impls.py
LINES: 1
SIZE: 3.90 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\torchbind_impls.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import contextlib
from pathlib import Path
from typing import Optional
import torch
_TORCHBIND_IMPLS_INITIALIZED = False
_TENSOR_QUEUE_GLOBAL_TEST: Optional[torch.ScriptObject] = None
def init_torchbind_implementations():
global _TORCHBIND_IMPLS_INITIALIZED
global _TENSOR_QUEUE_GLOBAL_TEST
if _TORCHBIND_IMPLS_INITIALIZED:
return
load_torchbind_test_lib()
register_fake_operators()
register_fake_classes()
_TENSOR_QUEUE_GLOBAL_TEST = _empty_tensor_queue()
_TORCHBIND_IMPLS_INITIALIZED = True
def _empty_tensor_queue() -> torch.ScriptObject:
return torch.classes._TorchScriptTesting._TensorQueue(
torch.empty(
0,
).fill_(-1)
)
# put these under a function because the corresponding library might not be loaded yet.
def register_fake_operators():
@torch.library.register_fake("_TorchScriptTesting::takes_foo_python_meta")
def fake_takes_foo(foo, z):
return foo.add_tensor(z)
@torch.library.register_fake("_TorchScriptTesting::queue_pop")
def fake_queue_pop(tq):
return tq.pop()
@torch.library.register_fake("_TorchScriptTesting::queue_push")
def fake_queue_push(tq, x):
return tq.push(x)
@torch.library.register_fake("_TorchScriptTesting::queue_size")
def fake_queue_size(tq):
return tq.size()
def meta_takes_foo_list_return(foo, x):
a = foo.add_tensor(x)
b = foo.add_tensor(a)
c = foo.add_tensor(b)
return [a, b, c]
def meta_takes_foo_tuple_return(foo, x):
a = foo.add_tensor(x)
b = foo.add_tensor(a)
return (a, b)
torch.ops._TorchScriptTesting.takes_foo_list_return.default.py_impl(
torch._C.DispatchKey.Meta
)(meta_takes_foo_list_return)
torch.ops._TorchScriptTesting.takes_foo_tuple_return.default.py_impl(
torch._C.DispatchKey.Meta
)(meta_takes_foo_tuple_return)
torch.ops._TorchScriptTesting.takes_foo.default.py_impl(torch._C.DispatchKey.Meta)(
# make signature match original cpp implementation to support kwargs
lambda foo, x: foo.add_tensor(x)
)
def register_fake_classes():
# noqa: F841
@torch._library.register_fake_class("_TorchScriptTesting::_Foo")
class FakeFoo:
def __init__(self, x: int, y: int):
self.x = x
self.y = y
@classmethod
def __obj_unflatten__(cls, flattend_foo):
return cls(**dict(flattend_foo))
def add_tensor(self, z):
return (self.x + self.y) * z
@torch._library.register_fake_class("_TorchScriptTesting::_ContainsTensor")
class FakeContainsTensor:
def __init__(self, t: torch.Tensor):
self.t = t
@classmethod
def __obj_unflatten__(cls, flattend_foo):
return cls(**dict(flattend_foo))
def get(self):
return self.t
def load_torchbind_test_lib():
import unittest
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
find_library_location,
IS_FBCODE,
IS_MACOS,
IS_SANDCASTLE,
IS_WINDOWS,
)
if IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
elif IS_SANDCASTLE or IS_FBCODE:
lib_file_path = Path("//caffe2/test/cpp/jit:test_custom_class_registrations")
elif IS_WINDOWS:
lib_file_path = find_library_location("torchbind_test.dll")
else:
lib_file_path = find_library_location("libtorchbind_test.so")
torch.ops.load_library(str(lib_file_path))
@contextlib.contextmanager
def _register_py_impl_temporarily(op_overload, key, fn):
try:
op_overload.py_impl(key)(fn)
yield
finally:
del op_overload.py_kernels[key]
op_overload._dispatch_cache.clear()
```
|
===============================================================================================================================
SOURCE CODE FILE: triton_utils.py
LINES: 1
SIZE: 17.38 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\triton_utils.py
ENCODING: utf-8
```py
# mypy: ignore-errors
import unittest
from torch.testing._internal.inductor_utils import HAS_CUDA, HAS_GPU
from torch.utils._triton import has_triton
requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda")
requires_gpu = unittest.skipUnless(HAS_GPU, "requires gpu")
if has_triton():
import triton
from triton import language as tl
# Define here so that multiple tests can take advantage of it
@triton.jit
def add_kernel(
in_ptr0,
in_ptr1,
out_ptr,
n_elements,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr0 + offsets, mask=mask)
y = tl.load(in_ptr1 + offsets, mask=mask)
output = x + y
tl.store(out_ptr + offsets, output, mask=mask)
@triton.jit
def sub_kernel(
in_ptr0,
in_ptr1,
out_ptr,
n_elements,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr0 + offsets, mask=mask)
y = tl.load(in_ptr1 + offsets, mask=mask)
output = x - y
tl.store(out_ptr + offsets, output, mask=mask)
@triton.jit
def add_kernel_with_optional_param(
in_ptr0,
in_ptr1,
out_ptr,
n_elements,
ARGS_PASSED: "tl.constexpr",
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr0 + offsets, mask=mask)
if ARGS_PASSED == "two":
y = tl.load(in_ptr1 + offsets, mask=mask)
output = x + y
else:
output = x
tl.store(out_ptr + offsets, output, mask=mask)
@triton.jit
def add_kernel_with_none_param_and_equal_to_1_arg(
in_ptr0,
in_ptr1, # in_ptr1 could be None
out_ptr,
n_elements,
stride,
ARGS_PASSED: "tl.constexpr",
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr0 + offsets * stride, mask=mask)
if ARGS_PASSED == "two":
y = tl.load(in_ptr1 + offsets, mask=mask)
output = x + y
else:
output = x
tl.store(out_ptr + offsets * stride, output, mask=mask)
@triton.autotune(
configs=[
triton.Config({"BLOCK_SIZE": 128}, num_stages=3, num_warps=8),
triton.Config({"BLOCK_SIZE": 128}, num_stages=4, num_warps=4),
triton.Config({"BLOCK_SIZE": 64}, num_stages=3, num_warps=8),
triton.Config({"BLOCK_SIZE": 64}, num_stages=4, num_warps=4),
],
key=[],
)
@triton.jit
def add_kernel_autotuned(
in_ptr0,
in_ptr1,
out_ptr,
n_elements,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr0 + offsets, mask=mask)
y = tl.load(in_ptr1 + offsets, mask=mask)
output = x + y
tl.store(out_ptr + offsets, output, mask=mask)
@triton.autotune(
configs=[
triton.Config({"BLOCK_SIZE": 16}, num_stages=2, num_warps=2),
],
key=[],
)
@triton.jit
def add_kernel_autotuned_weird_param_order(
in_ptr0,
in_ptr1,
n_elements,
BLOCK_SIZE: "tl.constexpr",
out_ptr,
):
# out_ptr is after an autotuned param that's declared as tl.constexpr.
# This param ordering can create bugs if not handled correctly.
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr0 + offsets, mask=mask)
y = tl.load(in_ptr1 + offsets, mask=mask)
output = x + y
tl.store(out_ptr + offsets, output, mask=mask)
@triton.autotune(
configs=[
triton.Config(
{"BLOCK_SIZE_X": 128, "BLOCK_SIZE_Y": 128}, num_stages=3, num_warps=8
),
triton.Config(
{"BLOCK_SIZE_X": 128, "BLOCK_SIZE_Y": 128}, num_stages=4, num_warps=4
),
triton.Config(
{"BLOCK_SIZE_X": 64, "BLOCK_SIZE_Y": 64}, num_stages=3, num_warps=8
),
triton.Config(
{"BLOCK_SIZE_X": 64, "BLOCK_SIZE_Y": 64}, num_stages=4, num_warps=4
),
],
key=[],
)
@triton.jit
def add_kernel_2d_autotuned(
in_ptr0,
in_ptr1,
out_ptr,
x_elements,
y_elements,
BLOCK_SIZE_X: "tl.constexpr",
BLOCK_SIZE_Y: "tl.constexpr",
):
xoffset = tl.program_id(0) * BLOCK_SIZE_X
xindex = xoffset + tl.arange(0, BLOCK_SIZE_X)[:, None]
xmask = xindex < x_elements
yoffset = tl.program_id(1) * BLOCK_SIZE_Y
yindex = yoffset + tl.arange(0, BLOCK_SIZE_Y)[None, :]
ymask = yindex < y_elements
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (x1 + (x_elements * y0)), xmask & ymask)
tmp1 = tl.load(in_ptr0 + (y0 + (y_elements * x1)), xmask & ymask)
tmp2 = tmp0 + tmp1
tl.store(out_ptr + (x1 + (x_elements * y0)), tmp2, xmask & ymask)
def _dummy_early_config_prune(configs, *_, **__):
return configs
@triton.autotune(
configs=[
triton.Config({"BLOCK_SIZE": 128}, num_stages=3, num_warps=8),
triton.Config({"BLOCK_SIZE": 64}, num_stages=4, num_warps=4),
],
key=[],
warmup=10,
rep=20,
prune_configs_by={"early_config_prune": _dummy_early_config_prune},
)
@triton.jit
def add_kernel_autotuned_with_unsupported_args(
in_ptr0,
in_ptr1,
out_ptr,
n_elements,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr0 + offsets, mask=mask)
y = tl.load(in_ptr1 + offsets, mask=mask)
output = x + y
tl.store(out_ptr + offsets, output, mask=mask)
@triton.jit
def add_kernel_with_scaling(
in_ptr0,
in_ptr1,
out_ptr,
n_elements,
scaling_factor,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr0 + offsets, mask=mask)
y = tl.load(in_ptr1 + offsets, mask=mask)
output = (x + y) * scaling_factor
tl.store(out_ptr + offsets, output, mask=mask)
@triton.jit
def add_kernel_with_tma_1d(
in_desc_ptr0,
in_desc_ptr1,
out_desc_ptr,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
offset = pid * BLOCK_SIZE
a = tl._experimental_descriptor_load(
in_desc_ptr0,
[offset],
[BLOCK_SIZE],
tl.float32,
)
b = tl._experimental_descriptor_load(
in_desc_ptr1,
[offset],
[BLOCK_SIZE],
tl.float32,
)
output = a + b
tl._experimental_descriptor_store(
out_desc_ptr,
output,
[offset],
)
@triton.jit
def add_kernel_with_tma_2d(
in_desc_ptr0,
in_desc_ptr1,
out_desc_ptr,
BLOCK_SIZE_X: "tl.constexpr",
BLOCK_SIZE_Y: "tl.constexpr",
):
pid_x = tl.program_id(axis=0)
pid_y = tl.program_id(axis=1)
offset_x = pid_x * BLOCK_SIZE_X
offset_y = pid_y * BLOCK_SIZE_Y
x = tl._experimental_descriptor_load(
in_desc_ptr0,
[offset_x, offset_y],
[BLOCK_SIZE_X, BLOCK_SIZE_Y],
tl.float32,
)
y = tl._experimental_descriptor_load(
in_desc_ptr1,
[offset_x, offset_y],
[BLOCK_SIZE_X, BLOCK_SIZE_Y],
tl.float32,
)
output = x + y
tl._experimental_descriptor_store(
out_desc_ptr,
output,
[offset_x, offset_y],
)
@triton.jit
def mul2_kernel(
in_ptr0,
out_ptr,
n_elements,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr0 + offsets, mask=mask)
output = 2 * x
tl.store(out_ptr + offsets, output, mask=mask)
@triton.jit
def mul2_inplace_kernel(
ptr,
n_elements,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(ptr + offsets, mask=mask)
output = 2 * x
tl.store(ptr + offsets, output, mask=mask)
@triton.jit
def zero_negs(x):
return tl.where(x >= 0, x, 0)
@triton.jit
def indirection_kernel(
in_ptr0,
out_ptr,
n_elements,
BLOCK_SIZE: "tl.constexpr",
ACTIVATION: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
if ACTIVATION == "mul2_inplace_kernel":
mul2_inplace_kernel(in_ptr0, n_elements, BLOCK_SIZE=BLOCK_SIZE)
elif ACTIVATION == "add_kernel":
add_kernel(in_ptr0, in_ptr0, out_ptr, n_elements, BLOCK_SIZE=BLOCK_SIZE)
x = tl.load(in_ptr0 + offsets, mask=mask)
tl.store(out_ptr + offsets, x, mask=mask)
@triton.jit
def double_strided_kernel(
in_ptr,
out_ptr,
in_y_stride,
out_y_stride,
X_BLOCK_SIZE: "tl.constexpr",
Y_BLOCK_SIZE: "tl.constexpr",
):
xid = tl.program_id(axis=0)
yid = tl.program_id(axis=1)
x_start = xid * X_BLOCK_SIZE
y_start = yid * Y_BLOCK_SIZE
x_offsets = x_start + tl.arange(0, X_BLOCK_SIZE)
y_offsets = y_start + tl.arange(0, Y_BLOCK_SIZE)
src_offsets = y_offsets[:, None] * in_y_stride + x_offsets[None, :]
dst_offsets = y_offsets[:, None] * out_y_stride + x_offsets[None, :]
src = tl.load(in_ptr + src_offsets)
tl.store(out_ptr + dst_offsets, src * 2.0)
@triton.jit
def inline_asm_kernel_is_pure_true(
X, Y, Z, n: "tl.constexpr", BLOCK: "tl.constexpr"
):
x = tl.load(X + tl.arange(0, BLOCK))
y = tl.load(Y + tl.arange(0, BLOCK))
s = tl.full([BLOCK], n, tl.int32)
z = tl.inline_asm_elementwise(
"shf.l.wrap.b32 $0, $1, $2, $3;",
"=r,r, r, r",
[x, y, s],
dtype=tl.int32,
is_pure=True,
pack=1,
)
tl.store(Z + tl.arange(0, BLOCK), z)
@triton.jit
def inline_asm_kernel_is_pure_false(
X, Y, Z, n: "tl.constexpr", BLOCK: "tl.constexpr"
):
x = tl.load(X + tl.arange(0, BLOCK))
y = tl.load(Y + tl.arange(0, BLOCK))
s = tl.full([BLOCK], n, tl.int32)
z = tl.inline_asm_elementwise(
"shf.l.wrap.b32 $0, $1, $2, $3;",
"=r,r, r, r",
[x, y, s],
dtype=tl.int32,
is_pure=False,
pack=1,
)
tl.store(Z + tl.arange(0, BLOCK), z)
@triton.jit
def add_kernel_with_block_ptr(
x_ptr,
y_ptr,
output_ptr,
n_elements,
BLOCK_SIZE: tl.constexpr,
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
x = tl.load(
tl.make_block_ptr(
base=x_ptr,
shape=[n_elements],
strides=[1],
offsets=[block_start],
block_shape=[BLOCK_SIZE],
order=[0],
),
boundary_check=[0],
)
y = tl.load(
tl.make_block_ptr(
base=y_ptr,
shape=[n_elements],
strides=[1],
offsets=[block_start],
block_shape=[BLOCK_SIZE],
order=[0],
),
boundary_check=[0],
)
output = x + y
tl.store(
tl.make_block_ptr(
base=output_ptr,
shape=[n_elements],
strides=[1],
offsets=[block_start],
block_shape=[BLOCK_SIZE],
order=[0],
),
output,
boundary_check=[0],
)
@triton.jit
def kernel_with_block_ptr_2d(
x_ptr,
output_ptr,
n_elements,
BLOCK_SIZE: tl.constexpr,
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
x = tl.load(
tl.make_block_ptr(
base=x_ptr,
shape=[n_elements, 1],
strides=[1, 1],
offsets=[block_start, 0],
block_shape=[BLOCK_SIZE, 1],
order=[1, 0],
),
boundary_check=[0],
)
output = x
tl.store(
tl.make_block_ptr(
base=output_ptr,
shape=[n_elements, 1],
strides=[1, 1],
offsets=[block_start, 0],
block_shape=[BLOCK_SIZE, 1],
order=[1, 0],
),
output,
boundary_check=[0],
)
from triton.language import load, store
@triton.jit
def add_kernel_with_import(
in_ptr0,
in_ptr1,
out_ptr,
n_elements,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = load(in_ptr0 + offsets, mask=mask)
y = load(in_ptr1 + offsets, mask=mask)
output = x + y
store(out_ptr + offsets, output, mask=mask)
@triton.jit
def cond_op_kernel(
in_ptr0,
in_ptr1,
out_ptr,
n_elements,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr0 + offsets, mask=mask)
y = tl.load(in_ptr1 + offsets, mask=mask)
if tl.program_id(0) == 0:
output = x + y
else:
output = x * y
tl.store(out_ptr + offsets, output, mask=mask)
@triton.jit
def atomic_add_kernel(
in_ptr0,
in_ptr1,
out_ptr,
n_elements,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr0 + offsets, mask=mask)
y = tl.load(in_ptr1 + offsets, mask=mask)
output = x + y
tl.atomic_add(out_ptr + offsets, output, mask=mask)
@triton.jit
def add_4_times_kernel(
in_ptr0,
in_ptr1,
out_ptr,
n_elements,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr0 + offsets, mask=mask)
y = tl.load(in_ptr1 + offsets, mask=mask)
for i in range(2):
output = x + y
tl.store(out_ptr + offsets, output, mask=mask)
i = 2
while i > 0:
i -= 1
output = x + y
tl.store(out_ptr + offsets, output, mask=mask)
@triton.jit
def add_kernel_out_of_order_fn2(
in_ptr0,
in_ptr1,
n_elements,
out_ptr,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr0 + offsets, mask=mask)
y = tl.load(in_ptr1 + offsets, mask=mask)
output = x + y
tl.store(out_ptr + offsets, output, mask=mask)
```
|
=============================================================================================================================
SOURCE CODE FILE: two_tensor.py
LINES: 1
SIZE: 3.45 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\two_tensor.py
ENCODING: utf-8
```py
# mypy: ignore-errors
import torch
import torch.utils._pytree as pytree
from torch.utils._python_dispatch import return_and_correct_aliasing
# A simple tensor subclass that holds two tensors internally, and runs every op on both tensors.
class TwoTensor(torch.Tensor):
@staticmethod
def __new__(cls, a, b, outer_size=None, outer_stride=None):
if outer_size is None:
outer_size = a.size()
if outer_stride is None:
outer_stride = a.stride()
assert (
a.device == b.device
and a.layout == b.layout
and a.requires_grad == b.requires_grad
and a.dtype == b.dtype
)
# I guess it would be more accurate to represent the shape as torch.cat(a, b).shape
shape = outer_size
kwargs = {}
kwargs["strides"] = outer_stride
kwargs["storage_offset"] = a.storage_offset()
kwargs["device"] = a.device
kwargs["layout"] = a.layout
kwargs["requires_grad"] = a.requires_grad
kwargs["dtype"] = a.dtype
out = torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs)
assert a.shape == b.shape
assert a.stride() == b.stride()
assert a.storage_offset() == b.storage_offset()
return out
def __init__(self, a, b, outer_size=None, outer_stride=None):
self.a = a
self.b = b
def __repr__(self):
a_repr = repr(self.a)
b_repr = repr(self.b)
return f"TwoTensor({a_repr}, {b_repr})"
def __tensor_flatten__(self):
return ["a", "b"], None
@staticmethod
def __tensor_unflatten__(inner_tensors, meta, outer_size, outer_stride):
assert meta is None
a, b = inner_tensors["a"], inner_tensors["b"]
if type(a) is torch.Tensor:
assert outer_size is not None
assert outer_stride is not None
return TwoTensor(a, b, outer_size, outer_stride)
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs):
if kwargs is None:
kwargs = {}
args_a = pytree.tree_map_only(TwoTensor, lambda x: x.a, args)
args_b = pytree.tree_map_only(TwoTensor, lambda x: x.b, args)
kwargs_a = pytree.tree_map_only(TwoTensor, lambda x: x.a, kwargs)
kwargs_b = pytree.tree_map_only(TwoTensor, lambda x: x.b, kwargs)
out_a = func(*args_a, **kwargs_a)
out_b = func(*args_b, **kwargs_b)
out_a_flat, spec = pytree.tree_flatten(out_a)
out_b_flat = pytree.tree_leaves(out_b)
# for aten ops that return non-tensors, just assume that
# our two inner tensors return the same value
out_flat = [
cls(o_a, o_b) if isinstance(o_a, torch.Tensor) else o_a
for o_a, o_b in zip(out_a_flat, out_b_flat)
]
out = pytree.tree_unflatten(out_flat, spec)
from torch._higher_order_ops.cond import cond_op
if func is cond_op:
return out
else:
return return_and_correct_aliasing(func, args, kwargs, out)
def get_elem_a(self):
return self.a
class TwoTensorMode(torch.utils._python_dispatch.TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
out = func(*args, **kwargs)
if torch._subclasses.fake_tensor._is_tensor_constructor(func):
out = TwoTensor(out, out.clone())
return out
```
|
===============================================================================================================
SOURCE CODE FILE: _utils.py
LINES: 1
SIZE: 2.04 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_utils.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import contextlib
import torch
# Common testing utilities for use in public testing APIs.
# NB: these should all be importable without optional dependencies
# (like numpy and expecttest).
def wrapper_set_seed(op, *args, **kwargs):
"""Wrapper to set seed manually for some functions like dropout
See: https://github.com/pytorch/pytorch/pull/62315#issuecomment-896143189 for more details.
"""
with freeze_rng_state():
torch.manual_seed(42)
output = op(*args, **kwargs)
if isinstance(output, torch.Tensor) and output.device.type == "lazy":
# We need to call mark step inside freeze_rng_state so that numerics
# match eager execution
torch._lazy.mark_step() # type: ignore[attr-defined]
return output
@contextlib.contextmanager
def freeze_rng_state():
# no_dispatch needed for test_composite_compliance
# Some OpInfos use freeze_rng_state for rng determinism, but
# test_composite_compliance overrides dispatch for all torch functions
# which we need to disable to get and set rng state
with torch.utils._mode_utils.no_dispatch(), torch._C._DisableFuncTorch():
rng_state = torch.get_rng_state()
if torch.cuda.is_available():
cuda_rng_state = torch.cuda.get_rng_state()
try:
yield
finally:
# Modes are not happy with torch.cuda.set_rng_state
# because it clones the state (which could produce a Tensor Subclass)
# and then grabs the new tensor's data pointer in generator.set_state.
#
# In the long run torch.cuda.set_rng_state should probably be
# an operator.
#
# NB: Mode disable is to avoid running cross-ref tests on this seeding
with torch.utils._mode_utils.no_dispatch(), torch._C._DisableFuncTorch():
if torch.cuda.is_available():
torch.cuda.set_rng_state(cuda_rng_state) # type: ignore[possibly-undefined]
torch.set_rng_state(rng_state)
```
|
==============================================================================================================
SOURCE CODE FILE: torch_version.py
LINES: 1
SIZE: 2.54 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\torch_version.py
ENCODING: utf-8
```py
from collections.abc import Iterable
from typing import Any
from torch._vendor.packaging.version import InvalidVersion, Version
from torch.version import __version__ as internal_version
__all__ = ["TorchVersion"]
class TorchVersion(str):
"""A string with magic powers to compare to both Version and iterables!
Prior to 1.10.0 torch.__version__ was stored as a str and so many did
comparisons against torch.__version__ as if it were a str. In order to not
break them we have TorchVersion which masquerades as a str while also
having the ability to compare against both packaging.version.Version as
well as tuples of values, eg. (1, 2, 1)
Examples:
Comparing a TorchVersion object to a Version object
TorchVersion('1.10.0a') > Version('1.10.0a')
Comparing a TorchVersion object to a Tuple object
TorchVersion('1.10.0a') > (1, 2) # 1.2
TorchVersion('1.10.0a') > (1, 2, 1) # 1.2.1
Comparing a TorchVersion object against a string
TorchVersion('1.10.0a') > '1.2'
TorchVersion('1.10.0a') > '1.2.1'
"""
__slots__ = ()
# fully qualified type names here to appease mypy
def _convert_to_version(self, inp: Any) -> Any:
if isinstance(inp, Version):
return inp
elif isinstance(inp, str):
return Version(inp)
elif isinstance(inp, Iterable):
# Ideally this should work for most cases by attempting to group
# the version tuple, assuming the tuple looks (MAJOR, MINOR, ?PATCH)
# Examples:
# * (1) -> Version("1")
# * (1, 20) -> Version("1.20")
# * (1, 20, 1) -> Version("1.20.1")
return Version(".".join(str(item) for item in inp))
else:
raise InvalidVersion(inp)
def _cmp_wrapper(self, cmp: Any, method: str) -> bool:
try:
return getattr(Version(self), method)(self._convert_to_version(cmp))
except BaseException as e:
if not isinstance(e, InvalidVersion):
raise
# Fall back to regular string comparison if dealing with an invalid
# version like 'parrot'
return getattr(super(), method)(cmp)
for cmp_method in ["__gt__", "__lt__", "__eq__", "__ge__", "__le__"]:
setattr(
TorchVersion,
cmp_method,
lambda x, y, method=cmp_method: x._cmp_wrapper(y, method),
)
__version__ = TorchVersion(internal_version)
```
|
======================================================================================================
SOURCE CODE FILE: types.py
LINES: 1
SIZE: 3.73 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\types.py
ENCODING: utf-8
```py
# In some cases, these basic types are shadowed by corresponding
# top-level values. The underscore variants let us refer to these
# types. See https://github.com/python/mypy/issues/4146 for why these
# workarounds is necessary
import os
from builtins import ( # noqa: F401
bool as _bool,
bytes as _bytes,
complex as _complex,
float as _float,
int as _int,
str as _str,
)
from collections.abc import Sequence
from typing import Any, IO, TYPE_CHECKING, Union
from typing_extensions import Self, TypeAlias
# `as` imports have better static analysis support than assignment `ExposedType: TypeAlias = HiddenType`
from torch import ( # noqa: F401
device as _device,
DispatchKey as DispatchKey,
dtype as _dtype,
layout as _layout,
qscheme as _qscheme,
Size as Size,
SymBool as SymBool,
SymFloat as SymFloat,
SymInt as SymInt,
Tensor as Tensor,
)
if TYPE_CHECKING:
from torch.autograd.graph import GradientEdge
__all__ = ["Number", "Device", "FileLike", "Storage"]
# Convenience aliases for common composite types that we need
# to talk about in PyTorch
_TensorOrTensors: TypeAlias = Union[Tensor, Sequence[Tensor]] # noqa: PYI047
_TensorOrTensorsOrGradEdge: TypeAlias = Union[ # noqa: PYI047
Tensor,
Sequence[Tensor],
"GradientEdge",
Sequence["GradientEdge"],
]
_size: TypeAlias = Union[Size, list[int], tuple[int, ...]] # noqa: PYI042,PYI047
_symsize: TypeAlias = Union[Size, Sequence[Union[int, SymInt]]] # noqa: PYI042,PYI047
_dispatchkey: TypeAlias = Union[str, DispatchKey] # noqa: PYI042,PYI047
# int or SymInt
IntLikeType: TypeAlias = Union[int, SymInt]
# float or SymFloat
FloatLikeType: TypeAlias = Union[float, SymFloat]
# bool or SymBool
BoolLikeType: TypeAlias = Union[bool, SymBool]
py_sym_types = (SymInt, SymFloat, SymBool) # left un-annotated intentionally
PySymType: TypeAlias = Union[SymInt, SymFloat, SymBool]
# Meta-type for "numeric" things; matches our docs
Number: TypeAlias = Union[int, float, bool]
# tuple for isinstance(x, Number) checks.
# FIXME: refactor once python 3.9 support is dropped.
_Number = (int, float, bool)
FileLike: TypeAlias = Union[str, os.PathLike[str], IO[bytes]]
# Meta-type for "device-like" things. Not to be confused with 'device' (a
# literal device object). This nomenclature is consistent with PythonArgParser.
# None means use the default device (typically CPU)
Device: TypeAlias = Union[_device, str, int, None]
# Storage protocol implemented by ${Type}StorageBase classes
class Storage:
_cdata: int
device: _device
dtype: _dtype
_torch_load_uninitialized: bool
def __deepcopy__(self, memo: dict[int, Any]) -> Self:
raise NotImplementedError
def _new_shared(self, size: int) -> Self:
raise NotImplementedError
def _write_file(
self,
f: Any,
is_real_file: bool,
save_size: bool,
element_size: int,
) -> None:
raise NotImplementedError
def element_size(self) -> int:
raise NotImplementedError
def is_shared(self) -> bool:
raise NotImplementedError
def share_memory_(self) -> Self:
raise NotImplementedError
def nbytes(self) -> int:
raise NotImplementedError
def cpu(self) -> Self:
raise NotImplementedError
def data_ptr(self) -> int:
raise NotImplementedError
def from_file(
self,
filename: str,
shared: bool = False,
nbytes: int = 0,
) -> Self:
raise NotImplementedError
def _new_with_file(
self,
f: Any,
element_size: int,
) -> Self:
raise NotImplementedError
```
|
===============================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 4.08 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\__init__.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import copyreg
import os.path as _osp
import weakref
import torch
from torch.utils import (
backcompat as backcompat,
collect_env as collect_env,
data as data,
deterministic as deterministic,
hooks as hooks,
)
from torch.utils.backend_registration import (
generate_methods_for_privateuse1_backend,
rename_privateuse1_backend,
)
from torch.utils.cpp_backtrace import get_cpp_backtrace
from torch.utils.throughput_benchmark import ThroughputBenchmark
def set_module(obj, mod):
"""
Set the module attribute on a python object for a given object for nicer printing
"""
if not isinstance(mod, str):
raise TypeError("The mod argument should be a string")
obj.__module__ = mod
if torch._running_with_deploy():
# not valid inside torch_deploy interpreter, no paths exists for frozen modules
cmake_prefix_path = None
else:
cmake_prefix_path = _osp.join(
_osp.dirname(_osp.dirname(__file__)), "share", "cmake"
)
def swap_tensors(t1, t2):
"""
This function swaps the content of the two Tensor objects.
At a high level, this will make t1 have the content of t2 while preserving
its identity.
This will not work if t1 and t2 have different slots.
"""
# Ensure there are no weakrefs
if weakref.getweakrefs(t1):
raise RuntimeError("Cannot swap t1 because it has weakref associated with it")
if weakref.getweakrefs(t2):
raise RuntimeError("Cannot swap t2 because it has weakref associated with it")
t1_slots = set(copyreg._slotnames(t1.__class__)) # type: ignore[attr-defined]
t2_slots = set(copyreg._slotnames(t2.__class__)) # type: ignore[attr-defined]
if t1_slots != t2_slots:
raise RuntimeError("Cannot swap t1 and t2 if they have different slots")
def swap_attr(name):
tmp = getattr(t1, name)
setattr(t1, name, (getattr(t2, name)))
setattr(t2, name, tmp)
def error_pre_hook(grad_outputs):
raise RuntimeError(
"Trying to execute AccumulateGrad node that was poisoned by swap_tensors "
"this can happen when you try to run backward on a tensor that was swapped. "
"For a module m with `torch.__future__.set_swap_module_params_on_conversion(True)` "
"you should not change the device or dtype of the module (e.g. `m.cpu()` or `m.half()`) "
"between running forward and backward. To resolve this, please only change the "
"device/dtype before running forward (or after both forward and backward)."
)
def check_use_count(t, name="t1"):
use_count = t._use_count()
error_str = (
f"Expected use_count of {name} to be 1 or 2 with an AccumulateGrad node but got {use_count} "
f"make sure you are not holding references to the tensor in other places."
)
if use_count > 1:
if use_count == 2 and t.is_leaf:
accum_grad_node = torch.autograd.graph.get_gradient_edge(t).node
# Make sure that the accumulate_grad node was not lazy_init-ed by get_gradient_edge
if t._use_count() == 2:
accum_grad_node.register_prehook(error_pre_hook)
else:
raise RuntimeError(error_str)
else:
raise RuntimeError(error_str)
check_use_count(t1, "t1")
check_use_count(t2, "t2")
# Swap the types
# Note that this will fail if there are mismatched slots
swap_attr("__class__")
# Swap the dynamic attributes
swap_attr("__dict__")
# Swap the slots
for slot in t1_slots:
if hasattr(t1, slot) and hasattr(t2, slot):
swap_attr(slot)
elif hasattr(t1, slot):
setattr(t2, slot, (getattr(t1, slot)))
delattr(t1, slot)
elif hasattr(t2, slot):
setattr(t1, slot, (getattr(t2, slot)))
delattr(t2, slot)
# Swap the at::Tensor they point to
torch._C._swap_tensor_impl(t1, t2)
```
|
=================================================================================================================================
SOURCE CODE FILE: _appending_byte_serializer.py
LINES: 1
SIZE: 2.96 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_appending_byte_serializer.py
ENCODING: utf-8
```py
from collections.abc import Iterable
from typing import Callable, Generic, TypeVar
T = TypeVar("T")
_ENCODING_VERSION: int = 1
__all__ = ["AppendingByteSerializer"]
#######################################
# Helper classes
#######################################
class BytesWriter:
def __init__(self, preallocate_size: int) -> None:
self._data = bytearray(preallocate_size)
def write_uint64(self, i: int) -> None:
self._data.extend(i.to_bytes(8, byteorder="big", signed=False))
def write_str(self, s: str) -> None:
payload = s.encode("utf-8")
self.write_bytes(payload)
def write_bytes(self, b: bytes) -> None:
self.write_uint64(len(b))
self._data.extend(b)
def to_bytes(self) -> bytes:
return bytes(self._data)
class BytesReader:
def __init__(self, data: bytes) -> None:
self._data = data
self._i = 0
def is_finished(self) -> bool:
return len(self._data) == self._i
def read_uint64(self) -> int:
result = int.from_bytes(
self._data[self._i : self._i + 8], byteorder="big", signed=False
)
self._i += 8
return result
def read_str(self) -> str:
return self.read_bytes().decode("utf-8")
def read_bytes(self) -> bytes:
size = self.read_uint64()
result = self._data[self._i : self._i + size]
self._i += size
return result
#######################################
# AppendingByteSerializer
#######################################
class AppendingByteSerializer(Generic[T]):
"""
Provides efficient serialization and deserialization of list of bytes
Note that this does not provide any guarantees around byte order
"""
_serialize_fn: Callable[[BytesWriter, T], None]
_writer: BytesWriter
_preallocate_size: int
def __init__(
self,
*,
serialize_fn: Callable[[BytesWriter, T], None],
preallocate_size: int = 0,
) -> None:
self._serialize_fn = serialize_fn
self._preallocate_size = preallocate_size
self.clear()
def clear(self) -> None:
self._writer = BytesWriter(preallocate_size=self._preallocate_size)
# First 8-bytes are for version
self._writer.write_uint64(_ENCODING_VERSION)
def append(self, data: T) -> None:
self._serialize_fn(self._writer, data)
def extend(self, elems: Iterable[T]) -> None:
for elem in elems:
self.append(elem)
def to_bytes(self) -> bytes:
return self._writer.to_bytes()
@staticmethod
def to_list(data: bytes, *, deserialize_fn: Callable[[BytesReader], T]) -> list[T]:
reader = BytesReader(data)
assert reader.read_uint64() == _ENCODING_VERSION
result: list[T] = []
while not reader.is_finished():
result.append(deserialize_fn(reader))
return result
```
|
======================================================================================================================
SOURCE CODE FILE: _backport_slots.py
LINES: 1
SIZE: 4.61 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_backport_slots.py
ENCODING: utf-8
```py
# This code is backported from python 3.10 dataclasses. Once 3.10 becomes the
# minimum supported we should use dataclass(slots=True) instead.
from __future__ import annotations
import dataclasses
import itertools
from typing import TYPE_CHECKING, TypeVar
if TYPE_CHECKING:
from collections.abc import Generator
from _typeshed import DataclassInstance
__all__ = ["dataclass_slots"]
_T = TypeVar("_T", bound="DataclassInstance")
def dataclass_slots(cls: type[_T]) -> type[DataclassInstance]:
assert dataclasses.is_dataclass(cls), "Can only be used on dataclasses."
def _get_slots(cls: type[DataclassInstance]) -> Generator[str, None, None]:
slots = cls.__dict__.get("__slots__")
# `__dictoffset__` and `__weakrefoffset__` can tell us whether
# the base type has dict/weakref slots, in a way that works correctly
# for both Python classes and C extension types. Extension types
# don't use `__slots__` for slot creation
if slots is None:
slots = []
if getattr(cls, "__weakrefoffset__", -1) != 0:
slots.append("__weakref__")
if getattr(cls, "__dictrefoffset__", -1) != 0:
slots.append("__dict__")
yield from slots
elif isinstance(slots, str):
yield slots
# Slots may be any iterable, but we cannot handle an iterator
# because it will already be (partially) consumed.
elif not hasattr(cls, "__next__"):
yield from slots
else:
raise TypeError(f"Slots of '{cls.__name__}' cannot be determined")
def _add_slots(
cls: type[DataclassInstance], is_frozen: bool, weakref_slot: bool
) -> type[DataclassInstance]:
# Need to create a new class, since we can't set __slots__
# after a class has been created.
# Make sure __slots__ isn't already set.
if "__slots__" in cls.__dict__:
raise TypeError(f"{cls.__name__} already specifies __slots__")
# Create a new dict for our new class.
cls_dict = dict(cls.__dict__)
field_names = tuple(f.name for f in dataclasses.fields(cls))
# Make sure slots don't overlap with those in base classes.
inherited_slots = set(
itertools.chain.from_iterable(map(_get_slots, cls.__mro__[1:-1]))
)
# The slots for our class. Remove slots from our base classes. Add
# '__weakref__' if weakref_slot was given, unless it is already present.
cls_dict["__slots__"] = tuple(
itertools.filterfalse(
inherited_slots.__contains__,
itertools.chain(
# gh-93521: '__weakref__' also needs to be filtered out if
# already present in inherited_slots
field_names,
("__weakref__",) if weakref_slot else (),
),
),
)
for field_name in field_names:
# Remove our attributes, if present. They'll still be
# available in _MARKER.
cls_dict.pop(field_name, None)
# Remove __dict__ itself.
cls_dict.pop("__dict__", None)
# Clear existing `__weakref__` descriptor, it belongs to a previous type:
cls_dict.pop("__weakref__", None) # gh-102069
# And finally create the class.
qualname = getattr(cls, "__qualname__", None)
cls = type(cls.__name__, cls.__bases__, cls_dict)
if qualname is not None:
cls.__qualname__ = qualname
def _dataclass_getstate(self: _T) -> object:
fields = dataclasses.fields(self)
return [getattr(self, f.name) for f in fields]
def _dataclass_setstate(self: _T, state: list[object]) -> None:
fields = dataclasses.fields(self)
for field, value in zip(fields, state):
# use setattr because dataclass may be frozen
object.__setattr__(self, field.name, value)
if is_frozen:
# Need this for pickling frozen classes with slots.
if "__getstate__" not in cls_dict:
cls.__getstate__ = _dataclass_getstate # type: ignore[method-assign, assignment]
if "__setstate__" not in cls_dict:
cls.__setstate__ = _dataclass_setstate # type: ignore[attr-defined]
return cls
params = getattr(cls, dataclasses._PARAMS) # type: ignore[attr-defined]
weakref_slot = getattr(params, "weakref_slot", False)
return _add_slots(cls, params.frozen, weakref_slot)
```
|
=====================================================================================================================
SOURCE CODE FILE: _config_module.py
LINES: 2
SIZE: 29.67 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_config_module.py
ENCODING: utf-8
```py
import contextlib
import copy
import hashlib
import importlib
import inspect
import io
import os
import pickle
import sys
import tokenize
import unittest
from dataclasses import dataclass
from types import FunctionType, ModuleType
from typing import (
Any,
Callable,
Generic,
NoReturn,
Optional,
TYPE_CHECKING,
TypeVar,
Union,
)
from typing_extensions import deprecated
from unittest import mock
from torch._utils_internal import justknobs_check
# Types saved/loaded in configs
CONFIG_TYPES = (int, float, bool, type(None), str, list, set, tuple, dict)
# Duplicated, because mypy needs these types statically
T = TypeVar("T", bound=Union[int, float, bool, None, str, list, set, tuple, dict])
_UNSET_SENTINEL = object()
@dataclass
class _Config(Generic[T]):
"""Represents a config with richer behaviour than just a default value.
::
i.e.
foo = Config(justknob="//foo:bar", default=False)
install_config_module(...)
This configs must be installed with install_config_module to be used
Precedence Order:
alias: If set, the directly use the value of the alias.
env_name_force: If set, this environment variable has precedence over
everything after this.
If multiple env variables are given, the precendence order is from
left to right.
user_override: If a user sets a value (i.e. foo.bar=True), that
has precedence over everything after this.
env_name_default: If set, this environment variable will override everything
after this.
If multiple env variables are given, the precendence order is from
left to right.
justknob: If this pytorch installation supports justknobs, that will
override defaults, but will not override the user_override precendence.
default: This value is the lowest precendance, and will be used if nothing is
set.
Environment Variables:
These are interpreted to be either "0" or "1" to represent true and false.
Arguments:
justknob: the name of the feature / JK. In OSS this is unused.
default: is the value to default this knob to in OSS.
alias: The alias config to read instead.
env_name_force: The environment variable, or list of, to read that is a FORCE
environment variable. I.e. it overrides everything except for alias.
env_name_default: The environment variable, or list of, to read that changes the
default behaviour. I.e. user overrides take preference.
"""
default: Union[T, object]
justknob: Optional[str] = None
env_name_default: Optional[list[str]] = None
env_name_force: Optional[list[str]] = None
alias: Optional[str] = None
def __init__(
self,
default: Union[T, object] = _UNSET_SENTINEL,
justknob: Optional[str] = None,
env_name_default: Optional[Union[str, list[str]]] = None,
env_name_force: Optional[Union[str, list[str]]] = None,
value_type: Optional[type] = None,
alias: Optional[str] = None,
):
# python 3.9 does not support kw_only on the dataclass :(.
self.default = default
self.justknob = justknob
self.env_name_default = _Config.string_or_list_of_string_to_list(
env_name_default
)
self.env_name_force = _Config.string_or_list_of_string_to_list(env_name_force)
self.value_type = value_type
self.alias = alias
if self.alias is not None:
assert (
default is _UNSET_SENTINEL
and justknob is None
and env_name_default is None
and env_name_force is None
), "if alias is set, none of {default, justknob and env var} can be set"
@staticmethod
def string_or_list_of_string_to_list(
val: Optional[Union[str, list[str]]]
) -> Optional[list[str]]:
if val is None:
return None
if isinstance(val, str):
return [val]
assert isinstance(val, list)
return val
# In runtime, we unbox the Config[T] to a T, but typechecker cannot see this,
# so in order to allow for this dynamic behavior to work correctly with
# typechecking we are going to lie to the typechecker that Config[T] returns
# a T.
if TYPE_CHECKING:
def Config(
default: Union[T, object] = _UNSET_SENTINEL,
justknob: Optional[str] = None,
env_name_default: Optional[Union[str, list[str]]] = None,
env_name_force: Optional[Union[str, list[str]]] = None,
value_type: Optional[type] = None,
alias: Optional[str] = None,
) -> T:
...
else:
def Config(
default: Union[T, object] = _UNSET_SENTINEL,
justknob: Optional[str] = None,
env_name_default: Optional[Union[str, list[str]]] = None,
env_name_force: Optional[Union[str, list[str]]] = None,
value_type: Optional[type] = None,
alias: Optional[str] = None,
) -> _Config[T]:
return _Config(
default, justknob, env_name_default, env_name_force, value_type, alias
)
def _read_env_variable(name: str) -> Optional[Union[bool, str]]:
value = os.environ.get(name)
if value == "1":
return True
if value == "0":
return False
return value
def install_config_module(module: ModuleType) -> None:
"""
Converts a module-level config into a `ConfigModule()`.
See _config_typing.pyi for instructions on how to get the converted module to typecheck.
"""
class ConfigModuleInstance(ConfigModule):
# __annotations__ is written to by Sphinx autodoc
_bypass_keys = set({"_is_dirty", "_hash_digest", "__annotations__"})
def visit(
source: Union[ModuleType, type],
dest: Union[ModuleType, SubConfigProxy],
prefix: str,
) -> None:
"""Walk the module structure and move everything to module._config"""
if sys.version_info[:2] < (3, 10):
type_hints = getattr(source, "__annotations__", {})
else:
type_hints = inspect.get_annotations(source)
for key, value in list(source.__dict__.items()):
if (
key.startswith("__")
or isinstance(value, (ModuleType, FunctionType))
or (hasattr(value, "__module__") and value.__module__ == "typing")
# Handle from torch.utils._config_module import Config
or (isinstance(value, type) and issubclass(value, _Config))
):
continue
name = f"{prefix}{key}"
annotated_type = type_hints.get(key, None)
if isinstance(value, CONFIG_TYPES):
config[name] = _ConfigEntry(
_Config(default=value, value_type=annotated_type)
)
if dest is module:
delattr(module, key)
elif isinstance(value, _Config):
if annotated_type is not None and value.value_type is None:
value.value_type = annotated_type
config[name] = _ConfigEntry(value)
if dest is module:
delattr(module, key)
elif isinstance(value, type):
assert value.__module__ == module.__name__
# a subconfig with `class Blah:` syntax
proxy = SubConfigProxy(module, f"{name}.")
visit(value, proxy, f"{name}.")
if dest is module:
setattr(dest, key, proxy)
else:
dest.__dict__[key] = proxy
else:
raise AssertionError(f"Unhandled config {key}={value} ({type(value)})")
config: dict[str, _ConfigEntry] = {}
compile_ignored_keys = get_assignments_with_compile_ignored_comments(module)
visit(module, module, "")
module._config = config # type: ignore[attr-defined]
module._compile_ignored_keys = compile_ignored_keys # type: ignore[attr-defined]
module.__class__ = ConfigModuleInstance
module._is_dirty = True # type: ignore[attr-defined]
module._hash_digest = None # type: ignore[attr-defined]
COMPILE_IGNORED_MARKER = "@compile_ignored"
# Gets all the keys (i.e. assignments) with a @compile_ignored comment
def get_assignments_with_compile_ignored_comments(module: ModuleType) -> set[str]:
source_code = inspect.getsource(module)
assignments = set()
# Tokenize the source code to retrieve comments
tokens = tokenize.tokenize(io.BytesIO(source_code.encode("utf-8")).readline)
current_comment = "", -1
prev_name = ""
for token in tokens:
if token.type == tokenize.COMMENT:
prev_name = ""
maybe_current = token.string.strip()
if COMPILE_IGNORED_MARKER in maybe_current:
assert current_comment == (
"",
-1,
), f"unconsumed {COMPILE_IGNORED_MARKER}"
current_comment = maybe_current, token.start[0]
elif token.type == tokenize.NAME:
# Only accept the first name token, to handle if you have
# something like foo: Bar = ...
if not prev_name:
prev_name = token.string
elif token.type == tokenize.OP and token.string == "=":
# Check if the current assignment follows a comment
# with COMPILE_IGNORED_MARKER
if (
COMPILE_IGNORED_MARKER in current_comment[0]
and current_comment[1] == token.start[0] - 1
):
assignments.add(prev_name)
current_comment = "", -1 # reset
prev_name = ""
assert current_comment == ("", -1), f"unconsumed {COMPILE_IGNORED_MARKER}"
return assignments
@dataclass
class _ConfigEntry:
# The default value specified in the configuration
default: Any
# The type of the configuration value
value_type: type
# The value specified by the user when they overrode the configuration
# _UNSET_SENTINEL indicates the value is not set.
user_override: Any = _UNSET_SENTINEL
# The justknob to check for this config
justknob: Optional[str] = None
# environment variables are read at install time
env_value_force: Any = _UNSET_SENTINEL
env_value_default: Any = _UNSET_SENTINEL
# Used to work arounds bad assumptions in unittest.mock.patch
# The code to blame is
# https://github.com/python/cpython/blob/94a7a4e22fb8f567090514785c69e65298acca42/Lib/unittest/mock.py#L1637
# Essentially, mock.patch requires, that if __dict__ isn't accessible
# (which it isn't), that after delattr is called on the object, the
# object must throw when hasattr is called. Otherwise, it doesn't call
# setattr again.
# Technically we'll have an intermediate state of hiding the config while
# mock.patch is unpatching itself, but it calls setattr after the delete
# call so the final state is correct. It's just very unintuitive.
# upstream bug - python/cpython#126886
hide: bool = False
alias: Optional[str] = None
def __init__(self, config: _Config):
self.default = config.default
self.value_type = (
config.value_type if config.value_type is not None else type(self.default)
)
self.justknob = config.justknob
self.alias = config.alias
if config.env_name_default is not None:
for val in config.env_name_default:
if (env_value := _read_env_variable(val)) is not None:
self.env_value_default = env_value
break
if config.env_name_force is not None:
for val in config.env_name_force:
if (env_value := _read_env_variable(val)) is not None:
self.env_value_force = env_value
break
# Ensure justknobs and envvars are allowlisted types
if self.justknob is not None and self.default is not None:
assert isinstance(
self.default, bool
), f"justknobs only support booleans, {self.default} is not a boolean"
if self.value_type is not None and (
config.env_name_default is not None or config.env_name_force is not None
):
assert self.value_type in (
bool,
str,
Optional[bool],
Optional[str],
), f"envvar configs only support (optional) booleans or strings, {self.value_type} is neither"
class ConfigModule(ModuleType):
# NOTE: This should be kept in sync with _config_typing.pyi.
# The actual configuration settings. E.g., torch._dynamo.config.debug
# would live as "debug" in the key, and torch._inductor.config.triton.cudagraphs
# maps as "triton.cudagraphs". See discussion on the class for meaning of various sub items
_config: dict[str, _ConfigEntry]
_bypass_keys: set[str]
_compile_ignored_keys: set[str]
_is_dirty: bool
_hash_digest: Optional[bytes]
def __init__(self) -> None:
raise NotImplementedError(
f"use {__name__}.install_config_module(sys.modules[__name__])"
)
def __setattr__(self, name: str, value: object) -> None:
if name in self._bypass_keys:
super().__setattr__(name, value)
elif name not in self._config:
raise AttributeError(f"{self.__name__}.{name} does not exist")
elif self._config[name].alias is not None:
self._set_alias_val(self._config[name], value)
else:
self._config[name].user_override = value
self._is_dirty = True
self._config[name].hide = False
def __getattr__(self, name: str) -> Any:
try:
config = self._config[name]
if config.hide:
raise AttributeError(f"{self.__name__}.{name} does not exist")
alias_val = self._get_alias_val(config)
if alias_val is not _UNSET_SENTINEL:
return alias_val
if config.env_value_force is not _UNSET_SENTINEL:
return config.env_value_force
if config.user_override is not _UNSET_SENTINEL:
return config.user_override
if config.env_value_default is not _UNSET_SENTINEL:
return config.env_value_default
if config.justknob is not None:
# JK only supports bools and ints
return justknobs_check(name=config.justknob, default=config.default)
# Note that reference types can still be modified, so we
# copy them to user_overrides in case the user overrides
# them
if isinstance(config.default, (list, set, dict)):
config.user_override = copy.deepcopy(config.default)
return config.user_override
return config.default
except KeyError as e:
# make hasattr() work properly
raise AttributeError(f"{self.__name__}.{name} does not exist") from e
def __delattr__(self, name: str) -> None:
self._is_dirty = True
# must support delete because unittest.mock.patch deletes
# then recreate things
self._config[name].user_override = _UNSET_SENTINEL
self._config[name].hide = True
def _get_alias_module_and_name(
self, entry: _ConfigEntry
) -> Optional[tuple[ModuleType, str]]:
alias = entry.alias
if alias is None:
return None
module_name, constant_name = alias.rsplit(".", 1)
try:
module = importlib.import_module(module_name)
except ImportError as e:
raise AttributeError("config alias {alias} does not exist") from e
return module, constant_name
def _get_alias_val(self, entry: _ConfigEntry) -> Any:
data = self._get_alias_module_and_name(entry)
if data is None:
return _UNSET_SENTINEL
module, constant_name = data
constant_value = getattr(module, constant_name)
return constant_value
def _set_alias_val(self, entry: _ConfigEntry, val: Any) -> None:
data = self._get_alias_module_and_name(entry)
assert data is not None
module, constant_name = data
setattr(module, constant_name, val)
def _is_default(self, name: str) -> bool:
"""
Returns true if the config is at its default value.
configs overriden by the env are not considered default.
"""
config_val = self._config[name]
# The config is not overridden by the user, and the env_value_default
# is different from the default value (meaning user has set the env to
# change the default value).
not_set_env_default = (
config_val.env_value_default is _UNSET_SENTINEL
or config_val.env_value_default == config_val.default
)
not_set_env_force = (
config_val.env_value_force is _UNSET_SENTINEL
or config_val.env_value_force == config_val.default
)
unset = config_val.user_override is _UNSET_SENTINEL
# Handle reference types specially to avoid spammy warnings
if isinstance(config_val.default, (list, set, dict)):
unset = unset or config_val.user_override == config_val.default
return unset and not_set_env_default and not_set_env_force
def _get_dict(
self,
ignored_keys: Optional[list[str]] = None,
ignored_prefixes: Optional[list[str]] = None,
skip_default: bool = False,
) -> dict[str, Any]:
"""Export a dictionary of current configuration keys and values.
This function is design to provide a single point which handles
accessing config options and exporting them into a dictionary.
This is used by a number of different user facing export methods
which all have slightly different semantics re: how and what to
skip.
If a config is aliased, it skips this config.
Arguments:
ignored_keys are keys that should not be exported.
ignored_prefixes are prefixes that if a key matches should
not be exported
skip_default does two things. One if a key has not been modified
it skips it.
"""
config: dict[str, Any] = {}
for key in self._config:
if ignored_keys and key in ignored_keys:
continue
if ignored_prefixes:
if any(key.startswith(prefix) for prefix in ignored_prefixes):
continue
if skip_default and self._is_default(key):
continue
if self._config[key].alias is not None:
continue
config[key] = copy.deepcopy(getattr(self, key))
return config
def get_type(self, config_name: str) -> type:
return self._config[config_name].value_type
def save_config(self) -> bytes:
"""Convert config to a pickled blob"""
ignored_keys = getattr(self, "_save_config_ignore", [])
return pickle.dumps(
self._get_dict(ignored_keys=ignored_keys),
protocol=2,
)
def save_config_portable(self) -> dict[str, Any]:
"""Convert config to portable format"""
prefixes = ["_"]
prefixes.extend(getattr(self, "_cache_config_ignore_prefix", []))
return self._get_dict(ignored_prefixes=prefixes)
def codegen_config(self) -> str:
"""Convert config to Python statements that replicate current config.
This does NOT include config settings that are at default values.
"""
# additional imports required
imports = set()
def get_module_name(func: Callable, add_dot: bool) -> str:
module_name = func.__module__
if module_name == "builtins":
module_name = ""
if add_dot and module_name != "":
module_name += "."
return module_name
def add_import(func: Callable) -> None:
module_name = get_module_name(func, False)
if module_name:
imports.add(module_name)
def list_of_callables_to_string(v: Union[list, set]) -> list[str]:
return [f"{get_module_name(item, True)}{item.__name__}" for item in v]
def importable_callable(v: Any) -> bool:
# functools.partial has no attributes below but is a callable
return callable(v) and hasattr(v, "__module__") and hasattr(v, "__name__")
def get_config_line(mod, k, v) -> str: # type: ignore[no-untyped-def]
"""
Return a string version of the config line.
Handle v when v is a callable, or a list/dict of callables. Add import statements for callables if necessary.
We assume that the value of a single config won't be a mix of callables and non-callables.
Example output:
import logging
import _warnings
torch._dynamo.config.reorderable_logging_functions = { _warnings.warn, logging.warn, print }
"""
if importable_callable(v):
add_import(v)
return f"{mod}.{k} = {get_module_name(v, True)}{v.__name__}"
elif isinstance(v, (list, set)) and all(
importable_callable(item) for item in v
):
for item in v:
add_import(item)
v_list = list_of_callables_to_string(v)
if isinstance(v, list):
return f"{mod}.{k} = {v_list}"
else:
return f"{mod}.{k} = {{ {', '.join(v_list)} }}"
else:
return f"{mod}.{k} = {v!r}"
lines = []
mod = self.__name__
for k, v in self._get_dict(
ignored_keys=getattr(self, "_save_config_ignore", []), skip_default=True
).items():
lines.append(get_config_line(mod, k, v))
for import_name in imports:
lines.insert(0, f"import {import_name}")
return "\n".join(lines)
def get_hash(self) -> bytes:
"""Hashes the configs that are not compile_ignored"""
if self._is_dirty or self._hash_digest is None:
dict_to_hash = self._get_dict(ignored_keys=list(self._compile_ignored_keys))
string_to_hash = repr(sorted(dict_to_hash.items()))
self._hash_digest = hashlib.md5(
string_to_hash.encode("utf-8"), usedforsecurity=False
).digest()
self._is_dirty = False
return self._hash_digest
@deprecated(
"`config.to_dict()` has been deprecated. It no longer changes the underlying config."
" use `config.get_config_copy()` instead if you just want a copy of the config, or "
"config.load_config if you need mutable access",
category=FutureWarning,
)
def to_dict(self) -> dict[str, Any]:
return self.get_config_copy()
@deprecated(
"`config.shallow_copy_dict()` has been deprecated. It no longer changes the underlying config."
" use `config.get_config_copy()` instead if you just want a copy of the config, or "
"config.load_config if you need mutable access",
category=FutureWarning,
)
def shallow_copy_dict(self) -> dict[str, Any]:
return self.get_config_copy()
def load_config(self, maybe_pickled_config: Union[bytes, dict[str, Any]]) -> None:
"""Restore from a prior call to save_config() or shallow_copy_dict()"""
if not isinstance(maybe_pickled_config, dict):
config = pickle.loads(maybe_pickled_config)
else:
config = maybe_pickled_config
for k, v in config.items():
if k in self._config:
setattr(self, k, v)
else:
from torch._dynamo.utils import warn_once
warn_once(f"key {k} with value {v} is not understood by this config")
def get_config_copy(self) -> dict[str, Any]:
return self._get_dict()
def patch(
self,
arg1: Optional[Union[str, dict[str, Any]]] = None,
arg2: Any = None,
**kwargs: dict[str, Any],
) -> "ContextDecorator":
"""
Decorator and/or context manager to make temporary changes to a config.
As a decorator:
@config.patch("name", val)
@config.patch(name1=val1, name2=val2)
@config.patch({"name1": val1, "name2", val2})
def foo(...):
...
As a context manager:
with config.patch("name", val):
...
"""
changes: dict[str, Any]
if arg1 is not None:
if arg2 is not None:
assert isinstance(arg1, str)
# patch("key", True) syntax
changes = {arg1: arg2}
else:
assert isinstance(arg1, dict)
# patch({"key": True}) syntax
changes = arg1
assert not kwargs
else:
# patch(key=True) syntax
changes = kwargs
assert arg2 is None
assert isinstance(changes, dict), f"expected `dict` got {type(changes)}"
prior: dict[str, Any] = {}
config = self
class ConfigPatch(ContextDecorator):
def __enter__(self) -> None:
assert not prior
for key in changes.keys():
# KeyError on invalid entry
prior[key] = config.__getattr__(key)
for k, v in changes.items():
config.__setattr__(k, v)
def __exit__(self, exc_type, exc_val, exc_tb): # type: ignore[no-untyped-def]
for k, v in prior.items():
config.__setattr__(k, v)
prior.clear()
return ConfigPatch()
def _make_closure_patcher(self, **changes: dict[str, Any]) -> Any:
"""
A lower-overhead version of patch() for things on the critical path.
Usage:
# do this off the critical path
change_fn = config.make_closure_patcher(foo=True)
...
revert = change_fn()
try:
...
finally:
revert()
"""
config = self._config
def change() -> Callable[[], None]:
prior = {k: config[k].user_override for k in changes}
for k, v in changes.items():
self._config[k].user_override = v
def revert() -> None:
for k, v in prior.items():
self._config[k].user_override = v
return revert
return change
class ContextDecorator(contextlib.ContextDecorator):
"""
Same as contextlib.ContextDecorator, but with support for
`unittest.TestCase`
"""
def __enter__(self) -> None:
raise NotImplementedError("NYI")
def __exit__(self, exc_type, exc_val, exc_tb) -> NoReturn: # type: ignore[no-untyped-def]
raise NotImplementedError("NYI")
def __call__(self, func: Callable[[Any], Any]) -> Any:
if isinstance(func, type) and issubclass(func, unittest.TestCase):
class _TestCase(func): # type: ignore[valid-type, misc]
@classmethod
def setUpClass(cls) -> None:
self.__enter__()
try:
super().setUpClass()
except Exception:
self.__exit__(None, None, None)
raise
@classmethod
def tearDownClass(cls) -> None:
try:
super().tearDownClass()
finally:
self.__exit__(None, None, None)
_TestCase.__name__ = func.__name__
_TestCase.__qualname__ = func.__qualname__
_TestCase.__module__ = func.__module__
return _TestCase
return super().__call__(func)
class SubConfigProxy:
"""
Shim to redirect to main config.
`config.triton.cudagraphs` maps to _config["triton.cudagraphs"]
"""
def __init__(self, config: object, prefix: str):
# `super().__setattr__` to bypass custom `__setattr__`
super().__setattr__("_config", config)
super().__setattr__("_prefix", prefix)
def __setattr__(self, name: str, value: object) -> None:
return self._config.__setattr__(self._prefix + name, value)
def __getattr__(self, name: str) -> Any:
return self._config.__getattr__(self._prefix + name)
def __delattr__(self, name: str) -> None:
return self._config.__delattr__(self._prefix + name)
def patch_object(obj: object, name: str, value: object) -> object:
"""
Workaround `mock.patch.object` issue with ConfigModule
"""
if isinstance(obj, ConfigModule):
return obj.patch(name, value)
return mock.patch.object(obj, name, value)
def get_tristate_env(name: str, default: Any = None) -> Optional[bool]:
value = os.environ.get(name)
if value == "1":
return True
if value == "0":
return False
return default
```
|
=====================================================================================================================
SOURCE CODE FILE: _content_store.py
LINES: 1
SIZE: 9.24 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_content_store.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
# This module provides a FAST (on GPU) content addressable store for storages
# (and tensors on top of them) with VERY WEAK portability guarantees (e.g.,
# don't expect CPU/CUDA to address to the same hash, don't expect it to be
# portable across devices) that is NOT cryptographically secure. In return,
# we are able to hash 40G of tensor data on GPU in less than a second,
# compared to running SHA-1 in CPU which would a minute or so. The primary
# use case is for efficiently snapshotting intermediate tensor data for
# offline debugging, but it's been put in this module in case you think of
# another use case for it. The hash function could be replaced with a
# straight reimplementation of SHA-1, which would give us much stronger
# portability guarantees.
#
# WARNING: THERE IS NO BC/FC GUARANTEE FOR THIS FORMAT! If you need to format
# shift the result, consider packing it into a single torch.save object
# with traditional view sharing.
#
# Because of the weak portability guarantees, you can only write to the
# content store from a single process; we don't provide any capability
# of "reopening" a content store to add more things to it. But we don't
# assume that you can keep all of the tensors you want to add to the store
# in memory at once, because you probably can't! Nor do we assume that
# you know a priori whether or not two storages can be deduplicated or not.
#
# Note: only storages are content-addressed; tensors are name addressed
#
# Note: our padding strategy means that [1, 0] and [1] int16 tensors would
# map to the same (padded) storage. We think this will be immaterial for most
# users.
import ctypes
import functools
import hashlib
import os.path
import struct
from collections import defaultdict
from typing import Optional
import torch
import torch._prims as prims
import torch._utils
import torch.nn.functional as F
from torch.multiprocessing.reductions import StorageWeakRef
def lazy_compile(**compile_kwargs):
"""Lazily wrap a function with torch.compile on the first call
This avoids eagerly importing dynamo.
"""
def decorate_fn(fn):
@functools.wraps(fn)
def compile_hook(*args, **kwargs):
compiled_fn = torch.compile(fn, **compile_kwargs)
globals()[fn.__name__] = functools.wraps(fn)(compiled_fn)
return compiled_fn(*args, **kwargs)
return compile_hook
return decorate_fn
# Use of torch.compile is mandatory for (1) good memory usage
# and (2) xor_sum implementation. This is our first instance of
# using PT2 to implement a kernel in PyTorch; if we get AOT capabilities
# it would be good to apply it here.
@lazy_compile(dynamic=True)
def hash_storage_kernel(x):
# The randint calls are carefully written to hit things we
# have lowerings for in inductor. Lack of unsigned 32-bit integer
# is a pain.
a = torch.randint(
-(2**31), 2**31, x.shape, device=x.device, dtype=torch.int32
).abs()
a = ((a % (2**31 - 1)) + 1).long()
b = (
torch.randint(-(2**31), 2**31, x.shape, device=x.device, dtype=torch.int32)
.abs()
.long()
)
# This is a standard shift-multiply universal hash family
# plus xor sum hash, using Philox to generate random numbers.
# Our Philox RNG is not deterministic across devices so
# don't use this for stable hashing.
#
# This assumes fixed length so you're also obligated to bucket
# by the length of tensor as well
return prims.xor_sum((a * x + b).int(), [0])
# Returns a hex digest of the data in the storage. Guaranteed to be
# SHA-1 if stable_hash=True, otherwise it will consistent for a single
# process run but not necessarily across processes.
def hash_storage(storage: torch.UntypedStorage, *, stable_hash: bool = False) -> str:
import torch._dynamo
from torch._dynamo.utils import is_compile_supported
device_type = storage.device.type
if stable_hash or not is_compile_supported(device_type):
cpu_storage = storage.cpu()
# TODO: make storage support buffer protocol so this isn't
# necessary
buf = (ctypes.c_byte * cpu_storage.nbytes()).from_address(
cpu_storage.data_ptr()
)
sha1 = hashlib.sha1(usedforsecurity=False)
sha1.update(buf)
return sha1.hexdigest()
# TODO: factor this into a random utility
if device_type == "cpu":
generator = torch._C.default_generator
elif device_type == "cuda":
generator = torch.cuda.default_generators[storage.device.index]
elif device_type == "mps":
generator = torch.mps._get_default_mps_generator()
elif device_type == "xpu":
generator = torch.xpu.default_generators[storage.device.index]
else:
raise AssertionError(f"unhandled device type {device_type}")
state = generator.get_state()
try:
generator.manual_seed(0)
x = torch.empty(0, dtype=torch.uint8, device=storage.device).set_(storage) # type: ignore[call-overload]
# The dtype-casting view cannot be compiled, and so the
# padding/reshaping also needs to be done externally even
# though it could be profitably fused
pad = -x.numel() % 4
if pad > 0:
x = F.pad(x, (0, pad), "constant", 0)
x = x.view(torch.int32)
# We run the 32-bit hash five times with differing parameters to
# reduce chance of collision
ITER = 5
cs = [hash_storage_kernel(x).item() for _ in range(ITER)]
return struct.pack(">" + "i" * ITER, *cs).hex()
finally:
generator.set_state(state)
class ContentStoreWriter:
# Structure:
# storages/
# 00/
# 0000..00
# tensors/
# name
def __init__(self, loc: str, stable_hash: bool = False) -> None:
self.loc: str = loc
self.seen_storage_hashes: set[str] = set()
self.stable_hash = stable_hash
# TODO: offer some sort of non-blocking API to speed things up
def write_storage(self, storage: torch.UntypedStorage) -> str:
h = hash_storage(storage, stable_hash=self.stable_hash)
if h in self.seen_storage_hashes:
return h
# TODO: consider not using torch.save for this; we don't actually
# need any metadata for the storage
subfolder = os.path.join(self.loc, "storages")
os.makedirs(subfolder, exist_ok=True)
target = os.path.join(subfolder, h)
if os.path.exists(target):
return h
torch.save(storage, target)
self.seen_storage_hashes.add(h)
return h
def compute_tensor_metadata(self, t: torch.Tensor, h=None):
if h is None:
h = hash_storage(t.untyped_storage(), stable_hash=self.stable_hash)
return (
t.dtype,
h,
t.storage_offset(),
tuple(t.shape),
t.stride(),
torch._utils.get_tensor_metadata(t),
)
def write_tensor(self, name: str, t: torch.Tensor) -> None:
storage = t.untyped_storage()
h = self.write_storage(storage)
# TODO: Support more advanced snapshotting of requires_grad/grad/etc
d, f = os.path.split(name)
payload = self.compute_tensor_metadata(t, h=h)
subfolder = os.path.join(self.loc, "tensors", d)
os.makedirs(subfolder, exist_ok=True)
torch.save(payload, os.path.join(subfolder, f))
class ContentStoreReader:
def __init__(self, loc: str, *, cache=True) -> None:
self.loc = loc
self.storage_cache: Optional[
dict[Optional[torch.device], dict[str, StorageWeakRef]]
] = None
if cache:
self.storage_cache = defaultdict(dict)
def read_storage(self, h: str, *, device=None) -> torch.UntypedStorage:
if device is not None:
device = torch.device(device)
ws = (
self.storage_cache[device].get(h)
if self.storage_cache is not None
else None
)
s: Optional[torch.UntypedStorage]
if ws is not None:
s = torch.UntypedStorage._new_with_weak_ptr(ws.cdata)
if s is not None:
return s
s = torch.load(
os.path.join(self.loc, "storages", h),
weights_only=True,
map_location=device,
)._untyped_storage
assert s is not None
if self.storage_cache is not None:
self.storage_cache[device][h] = StorageWeakRef(s)
return s
def read_tensor_metadata(self, name: str):
fn = os.path.join(self.loc, "tensors", name)
if not os.path.exists(fn):
raise FileNotFoundError(fn)
return torch.load(fn, weights_only=True)
def read_tensor(self, name: str, *, device=None) -> torch.Tensor:
dtype, h, storage_offset, size, stride, metadata = self.read_tensor_metadata(
name
)
storage = self.read_storage(h, device=device)
t = torch.tensor([], dtype=dtype, device=storage.device)
t.set_(storage, storage_offset, size, stride)
torch._utils.set_tensor_metadata(t, metadata)
return t
```
|
==================================================================================================================
SOURCE CODE FILE: _contextlib.py
LINES: 1
SIZE: 6.03 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_contextlib.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
# Extra utilities for working with context managers that should have been
# in the standard library but are not
import functools
import inspect
import warnings
import sys
from typing import Any, Callable, TypeVar, cast
# Used for annotating the decorator usage of _DecoratorContextManager (e.g.,
# 'no_grad' and 'enable_grad').
# See https://mypy.readthedocs.io/en/latest/generics.html#declaring-decorators
FuncType = Callable[..., Any]
F = TypeVar('F', bound=FuncType)
def _wrap_generator(ctx_factory, func):
"""
Wrap each generator invocation with the context manager factory.
The input should be a function that returns a context manager,
not a context manager itself, to handle one-shot context managers.
"""
@functools.wraps(func)
def generator_context(*args, **kwargs):
gen = func(*args, **kwargs)
# Generators are suspended and unsuspended at `yield`, hence we
# make sure the grad mode is properly set every time the execution
# flow returns into the wrapped generator and restored when it
# returns through our `yield` to our caller (see PR #49017).
try:
# Issuing `None` to a generator fires it up
with ctx_factory():
response = gen.send(None)
while True:
try:
# Forward the response to our caller and get its next request
request = yield response
except GeneratorExit:
# Inform the still active generator about its imminent closure
with ctx_factory():
gen.close()
raise
except BaseException:
# Propagate the exception thrown at us by the caller
with ctx_factory():
response = gen.throw(*sys.exc_info())
else:
# Pass the last request to the generator and get its response
with ctx_factory():
response = gen.send(request)
# We let the exceptions raised above by the generator's `.throw` or
# `.send` methods bubble up to our caller, except for StopIteration
except StopIteration as e:
# The generator informed us that it is done: take whatever its
# returned value (if any) was and indicate that we're done too
# by returning it (see docs for python's return-statement).
return e.value
return generator_context
def context_decorator(ctx, func):
"""
Like contextlib.ContextDecorator.
But with the following differences:
1. Is done by wrapping, rather than inheritance, so it works with context
managers that are implemented from C and thus cannot easily inherit from
Python classes
2. Wraps generators in the intuitive way (c.f. https://bugs.python.org/issue37743)
3. Errors out if you try to wrap a class, because it is ambiguous whether
or not you intended to wrap only the constructor
The input argument can either be a context manager (in which case it must
be a multi-shot context manager that can be directly invoked multiple times)
or a callable that produces a context manager.
"""
assert not (callable(ctx) and hasattr(ctx, '__enter__')), (
f"Passed in {ctx} is both callable and also a valid context manager "
"(has __enter__), making it ambiguous which interface to use. If you "
"intended to pass a context manager factory, rewrite your call as "
"context_decorator(lambda: ctx()); if you intended to pass a context "
"manager directly, rewrite your call as context_decorator(lambda: ctx)"
)
if not callable(ctx):
def ctx_factory():
return ctx
else:
ctx_factory = ctx
if inspect.isclass(func):
raise RuntimeError(
"Cannot decorate classes; it is ambiguous whether or not only the "
"constructor or all methods should have the context manager applied; "
"additionally, decorating a class at definition-site will prevent "
"use of the identifier as a conventional type. "
"To specify which methods to decorate, decorate each of them "
"individually."
)
if inspect.isgeneratorfunction(func):
return _wrap_generator(ctx_factory, func)
@functools.wraps(func)
def decorate_context(*args, **kwargs):
with ctx_factory():
return func(*args, **kwargs)
return decorate_context
class _DecoratorContextManager:
"""Allow a context manager to be used as a decorator."""
def __call__(self, orig_func: F) -> F:
if inspect.isclass(orig_func):
warnings.warn(
"Decorating classes is deprecated and will be disabled in "
"future versions. You should only decorate functions or methods. "
"To preserve the current behavior of class decoration, you can "
"directly decorate the `__init__` method and nothing else.",
FutureWarning,
stacklevel=2,
)
func = cast(F, lambda *args, **kwargs: orig_func(*args, **kwargs))
else:
func = orig_func
return cast(F, context_decorator(self.clone, func))
def __enter__(self) -> None:
raise NotImplementedError
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
raise NotImplementedError
def clone(self):
# override this method if your children class takes __init__ parameters
return self.__class__()
class _NoParamDecoratorContextManager(_DecoratorContextManager):
"""Allow a context manager to be used as a decorator without parentheses."""
def __new__(cls, orig_func=None):
if orig_func is None:
return super().__new__(cls)
return cls()(orig_func)
```
|
=========================================================================================================================
SOURCE CODE FILE: _cpp_embed_headers.py
LINES: 2
SIZE: 1.77 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_cpp_embed_headers.py
ENCODING: utf-8
```py
from collections.abc import Sequence
from pathlib import Path
from re import match as _match
from typing import Optional, Union
def read_file(fname: Union[Path, str]) -> list[str]:
with open(fname, encoding="utf-8") as f:
return f.readlines()
def _embed_headers(
content: list[str], include_dirs: list[Path], processed_files: set[str]
) -> str:
for line_idx, cur_line in enumerate(content):
# Eliminate warning: `#pragma once in main file`
if cur_line.startswith("#pragma once"):
content[line_idx] = ""
continue
m = _match('^\\s*#include\\s*[<"]([^>"]+)[>"]', cur_line)
if m is None:
continue
for include_dir in include_dirs:
path = include_dir / m[1]
if not path.exists():
continue
if str(path) in processed_files:
content[line_idx] = ""
continue
processed_files.add(str(path))
content[line_idx] = _embed_headers(
read_file(path), include_dirs, processed_files
)
break
return "".join(content)
def embed_headers(
fname: str, include_dirs: Optional[Union[Sequence[str], Sequence[Path], str]] = None
) -> str:
if include_dirs is None:
include_dirs = [Path(__file__).parent.parent.parent]
elif isinstance(include_dirs, str):
include_dirs = [Path(include_dirs)]
else:
include_dirs = [Path(x) for x in include_dirs]
return _embed_headers(read_file(fname), include_dirs, {fname})
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
print("Usage:\n {sys.argv[0]} filename")
sys.exit(1)
print(embed_headers(sys.argv[1]))
```
|
===============================================================================================================================
SOURCE CODE FILE: _cpp_extension_versioner.py
LINES: 1
SIZE: 2.12 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_cpp_extension_versioner.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import collections
Entry = collections.namedtuple('Entry', 'version, hash')
def update_hash(seed, value):
# Good old boost::hash_combine
# https://www.boost.org/doc/libs/1_35_0/doc/html/boost/hash_combine_id241013.html
return seed ^ (hash(value) + 0x9e3779b9 + (seed << 6) + (seed >> 2))
def hash_source_files(hash_value, source_files):
for filename in source_files:
with open(filename, 'rb') as file:
hash_value = update_hash(hash_value, file.read())
return hash_value
def hash_build_arguments(hash_value, build_arguments):
for group in build_arguments:
if group:
for argument in group:
hash_value = update_hash(hash_value, argument)
return hash_value
class ExtensionVersioner:
def __init__(self):
self.entries = {}
def get_version(self, name):
entry = self.entries.get(name)
return None if entry is None else entry.version
def bump_version_if_changed(self,
name,
source_files,
build_arguments,
build_directory,
with_cuda,
with_sycl,
is_python_module,
is_standalone):
hash_value = 0
hash_value = hash_source_files(hash_value, source_files)
hash_value = hash_build_arguments(hash_value, build_arguments)
hash_value = update_hash(hash_value, build_directory)
hash_value = update_hash(hash_value, with_cuda)
hash_value = update_hash(hash_value, with_sycl)
hash_value = update_hash(hash_value, is_python_module)
hash_value = update_hash(hash_value, is_standalone)
entry = self.entries.get(name)
if entry is None:
self.entries[name] = entry = Entry(0, hash_value)
elif hash_value != entry.hash:
self.entries[name] = entry = Entry(entry.version + 1, hash_value)
return entry.version
```
|
==================================================================================================================
SOURCE CODE FILE: _cxx_pytree.py
LINES: 1
SIZE: 37.78 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_cxx_pytree.py
ENCODING: utf-8
```py
"""
Contains utility functions for working with nested python data structures.
A *pytree* is Python nested data structure. It is a tree in the sense that
nodes are Python collections (e.g., list, tuple, dict) and the leaves are
Python values. Furthermore, a pytree should not contain reference cycles.
pytrees are useful for working with nested collections of Tensors. For example,
one can use `tree_map` to map a function over all Tensors inside some nested
collection of Tensors and `tree_leaves` to get a flat list of all Tensors
inside some nested collection. pytrees are helpful for implementing nested
collection support for PyTorch APIs.
"""
import functools
import sys
import types
from collections.abc import Iterable
from typing import Any, Callable, Optional, overload, TypeVar, Union
from typing_extensions import deprecated, TypeIs
import optree
from torch._vendor.packaging.version import Version
# Keep the version in sync with torch.utils._cxx_pytree!
if Version(optree.__version__) < Version("0.13.0"): # type: ignore[attr-defined]
raise ImportError(
"torch.utils._cxx_pytree depends on optree, which is an optional dependency "
"of PyTorch. To use it, please upgrade your optree package to >= 0.13.0"
)
del Version
from optree import PyTreeSpec as TreeSpec # direct import for type annotations
import torch.utils._pytree as python_pytree
from torch.utils._pytree import KeyEntry as KeyEntry
__all__ = [
"PyTree",
"Context",
"FlattenFunc",
"UnflattenFunc",
"DumpableContext",
"ToDumpableContextFn",
"FromDumpableContextFn",
"TreeSpec",
"LeafSpec",
"keystr",
"key_get",
"register_pytree_node",
"tree_flatten",
"tree_flatten_with_path",
"tree_unflatten",
"tree_iter",
"tree_leaves",
"tree_leaves_with_path",
"tree_structure",
"tree_map",
"tree_map_with_path",
"tree_map_",
"tree_map_only",
"tree_map_only_",
"tree_all",
"tree_any",
"tree_all_only",
"tree_any_only",
"treespec_dumps",
"treespec_loads",
"treespec_pprint",
]
__TORCH_DICT_SESSION = optree.dict_insertion_ordered(True, namespace="torch")
__TORCH_DICT_SESSION.__enter__() # enable globally and permanently
T = TypeVar("T")
S = TypeVar("S")
U = TypeVar("U")
R = TypeVar("R")
Context = Any
PyTree = Any
FlattenFunc = Callable[[PyTree], tuple[list[Any], Context]]
UnflattenFunc = Callable[[Iterable[Any], Context], PyTree]
OpTreeUnflattenFunc = Callable[[Context, Iterable[Any]], PyTree]
DumpableContext = Any # Any json dumpable text
ToDumpableContextFn = Callable[[Context], DumpableContext]
FromDumpableContextFn = Callable[[DumpableContext], Context]
KeyPath = tuple[KeyEntry, ...]
FlattenWithKeysFunc = Callable[[PyTree], tuple[list[tuple[KeyEntry, Any]], Any]]
def _reverse_args(func: UnflattenFunc) -> OpTreeUnflattenFunc:
@functools.wraps(func)
def wrapped(*args: Any, **kwargs: Any) -> Any:
return func(*reversed(args), **kwargs)
return wrapped
def register_pytree_node(
cls: type[Any],
flatten_fn: FlattenFunc,
unflatten_fn: UnflattenFunc,
*,
serialized_type_name: Optional[str] = None,
to_dumpable_context: Optional[ToDumpableContextFn] = None,
from_dumpable_context: Optional[FromDumpableContextFn] = None,
flatten_with_keys_fn: Optional[FlattenWithKeysFunc] = None,
) -> None:
"""Register a container-like type as pytree node.
Args:
cls (type): A Python type to treat as an internal pytree node.
flatten_fn (callable): A function to be used during flattening, taking an instance of
``cls`` and returning a pair, with (1) an iterable for the children to be flattened
recursively, and (2) some hashable auxiliary data to be stored in the treespec and to be
passed to the ``unflatten_fn``.
unflatten_fn (callable): A function taking two arguments: the auxiliary data that was
returned by ``flatten_fn`` and stored in the treespec, and the unflattened children.
The function should return an instance of ``cls``.
serialized_type_name (str, optional): A keyword argument used to specify the fully
qualified name used when serializing the tree spec.
to_dumpable_context (callable, optional): An optional keyword argument to custom specify how
to convert the context of the pytree to a custom json dumpable representation. This is
used for json serialization, which is being used in :mod:`torch.export` right now.
from_dumpable_context (callable, optional): An optional keyword argument to custom specify
how to convert the custom json dumpable representation of the context back to the
original context. This is used for json deserialization, which is being used in
:mod:`torch.export` right now.
Example::
>>> # xdoctest: +SKIP
>>> # Registry a Python type with lambda functions
>>> register_pytree_node(
... set,
... lambda s: (sorted(s), None, None),
... lambda children, _: set(children),
... )
"""
if flatten_with_keys_fn is not None:
raise NotImplementedError("KeyPaths are not yet supported in cxx_pytree.")
_private_register_pytree_node(
cls,
flatten_fn,
unflatten_fn,
serialized_type_name=serialized_type_name,
to_dumpable_context=to_dumpable_context,
from_dumpable_context=from_dumpable_context,
)
python_pytree._private_register_pytree_node(
cls,
flatten_fn,
unflatten_fn,
serialized_type_name=serialized_type_name,
to_dumpable_context=to_dumpable_context,
from_dumpable_context=from_dumpable_context,
)
@deprecated(
"`torch.utils._cxx_pytree._register_pytree_node` is deprecated. "
"Please use `torch.utils._cxx_pytree.register_pytree_node` instead.",
category=FutureWarning,
)
def _register_pytree_node(
cls: type[Any],
flatten_fn: FlattenFunc,
unflatten_fn: UnflattenFunc,
*,
serialized_type_name: Optional[str] = None,
to_dumpable_context: Optional[ToDumpableContextFn] = None,
from_dumpable_context: Optional[FromDumpableContextFn] = None,
) -> None:
"""Register a container-like type as pytree node for the C++ pytree only.
The ``namespace`` argument is used to avoid collisions that occur when different libraries
register the same Python type with different behaviors. It is recommended to add a unique prefix
to the namespace to avoid conflicts with other libraries. Namespaces can also be used to specify
the same class in different namespaces for different use cases.
.. warning::
For safety reasons, a ``namespace`` must be specified while registering a custom type. It is
used to isolate the behavior of flattening and unflattening a pytree node type. This is to
prevent accidental collisions between different libraries that may register the same type.
Args:
cls (type): A Python type to treat as an internal pytree node.
flatten_fn (callable): A function to be used during flattening, taking an instance of
``cls`` and returning a pair, with (1) an iterable for the children to be flattened
recursively, and (2) some hashable auxiliary data to be stored in the treespec and to be
passed to the ``unflatten_fn``.
unflatten_fn (callable): A function taking two arguments: the auxiliary data that was
returned by ``flatten_fn`` and stored in the treespec, and the unflattened children.
The function should return an instance of ``cls``.
serialized_type_name (str, optional): A keyword argument used to specify the fully
qualified name used when serializing the tree spec.
to_dumpable_context (callable, optional): An optional keyword argument to custom specify how
to convert the context of the pytree to a custom json dumpable representation. This is
used for json serialization, which is being used in :mod:`torch.export` right now.
from_dumpable_context (callable, optional): An optional keyword argument to custom specify
how to convert the custom json dumpable representation of the context back to the
original context. This is used for json deserialization, which is being used in
:mod:`torch.export` right now.
"""
_private_register_pytree_node(
cls,
flatten_fn,
unflatten_fn,
serialized_type_name=serialized_type_name,
to_dumpable_context=to_dumpable_context,
from_dumpable_context=from_dumpable_context,
)
def _private_register_pytree_node(
cls: type[Any],
flatten_fn: FlattenFunc,
unflatten_fn: UnflattenFunc,
*,
serialized_type_name: Optional[str] = None,
to_dumpable_context: Optional[ToDumpableContextFn] = None,
from_dumpable_context: Optional[FromDumpableContextFn] = None,
) -> None:
"""This is an internal function that is used to register a pytree node type
for the C++ pytree only. End-users should use :func:`register_pytree_node`
instead.
"""
# TODO(XuehaiPan): remove this condition when we make Python pytree out-of-box support
# PyStructSequence types
if not optree.is_structseq_class(cls):
optree.register_pytree_node(
cls,
flatten_fn,
_reverse_args(unflatten_fn),
namespace="torch",
)
def _is_pytreespec_instance(obj: Any, /) -> TypeIs[TreeSpec]:
return isinstance(obj, TreeSpec)
def tree_is_leaf(
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> bool:
"""Check if a pytree is a leaf.
>>> tree_is_leaf(1)
True
>>> tree_is_leaf(None)
True
>>> tree_is_leaf([1, 2, 3])
False
>>> tree_is_leaf((1, 2, 3), is_leaf=lambda x: isinstance(x, tuple))
True
>>> tree_is_leaf({'a': 1, 'b': 2, 'c': 3})
False
>>> tree_is_leaf({'a': 1, 'b': 2, 'c': None})
False
Args:
tree (pytree): A pytree to check if it is a leaf node.
is_leaf (callable, optional): An extra leaf predicate function that will be called at each
flattening step. The function should have a single argument with signature
``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
leaf or not. If the function is not specified, the default pytree registry will be used.
Returns:
A boolean indicating if the pytree is a leaf node.
"""
return optree.tree_is_leaf(
tree,
is_leaf=is_leaf,
none_is_leaf=True,
namespace="torch",
)
def tree_flatten(
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> tuple[list[Any], TreeSpec]:
"""Flatten a pytree.
See also :func:`tree_unflatten`.
The flattening order (i.e., the order of elements in the output list) is deterministic,
corresponding to a left-to-right depth-first tree traversal.
>>> tree = {"b": (2, [3, 4]), "a": 1, "c": None, "d": 5}
>>> tree_flatten(tree)
([2, 3, 4, 1, None, 5], PyTreeSpec({'b': (*, [*, *]), 'a': *, 'c': *, 'd': *}, NoneIsLeaf, namespace='torch'))
>>> tree_flatten(1)
([1], PyTreeSpec(*, NoneIsLeaf, namespace='torch'))
>>> tree_flatten(None)
([None], PyTreeSpec(*, NoneIsLeaf, namespace='torch'))
>>> from collections import OrderedDict
>>> tree = OrderedDict([("b", (2, [3, 4])), ("a", 1), ("c", None), ("d", 5)])
>>> tree_flatten(tree)
([2, 3, 4, 1, None, 5], PyTreeSpec(OrderedDict({'b': (*, [*, *]), 'a': *, 'c': *, 'd': *}), NoneIsLeaf, namespace='torch'))
Args:
tree (pytree): A pytree to flatten.
is_leaf (callable, optional): An extra leaf predicate function that will be called at each
flattening step. The function should have a single argument with signature
``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
leaf or not. If the function is not specified, the default pytree registry will be used.
Returns:
A pair ``(leaves, treespec)`` where the first element is a list of leaf values and the
second element is a treespec representing the structure of the pytree.
"""
return optree.tree_flatten( # type: ignore[return-value]
tree,
is_leaf=is_leaf,
none_is_leaf=True,
namespace="torch",
)
def tree_unflatten(leaves: Iterable[Any], treespec: TreeSpec) -> PyTree:
"""Reconstruct a pytree from the treespec and the leaves.
The inverse of :func:`tree_flatten`.
>>> tree = {"b": (2, [3, 4]), "a": 1, "c": None, "d": 5}
>>> leaves, treespec = tree_flatten(tree)
>>> tree == tree_unflatten(leaves, treespec)
True
Args:
leaves (iterable): The list of leaves to use for reconstruction. The list must match the
number of leaves of the treespec.
treespec (TreeSpec): The treespec to reconstruct.
Returns:
The reconstructed pytree, containing the ``leaves`` placed in the structure described by
``treespec``.
"""
if not _is_pytreespec_instance(treespec):
raise TypeError(
f"tree_unflatten(leaves, treespec): Expected `treespec` to be instance of "
f"PyTreeSpec but got item of type {type(treespec)}."
)
return optree.tree_unflatten(treespec, leaves) # type: ignore[arg-type]
def tree_iter(
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> Iterable[Any]:
"""Get an iterator over the leaves of a pytree.
See also :func:`tree_flatten`.
>>> tree = {"b": (2, [3, 4]), "a": 1, "c": None, "d": 5}
>>> list(tree_iter(tree))
[2, 3, 4, 1, None, 5]
>>> list(tree_iter(1))
[1]
>>> list(tree_iter(None))
[None]
Args:
tree (pytree): A pytree to flatten.
is_leaf (callable, optional): An extra leaf predicate function that will be called at each
flattening step. The function should have a single argument with signature
``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
leaf or not. If the function is not specified, the default pytree registry will be used.
Returns:
An iterator over the leaf values.
"""
return optree.tree_iter(
tree,
is_leaf=is_leaf,
none_is_leaf=True,
namespace="torch",
)
def tree_leaves(
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> list[Any]:
"""Get the leaves of a pytree.
See also :func:`tree_flatten`.
>>> tree = {"b": (2, [3, 4]), "a": 1, "c": None, "d": 5}
>>> tree_leaves(tree)
[2, 3, 4, 1, None, 5]
>>> tree_leaves(1)
[1]
>>> tree_leaves(None)
[None]
Args:
tree (pytree): A pytree to flatten.
is_leaf (callable, optional): An extra leaf predicate function that will be called at each
flattening step. The function should have a single argument with signature
``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
leaf or not. If the function is not specified, the default pytree registry will be used.
Returns:
A list of leaf values.
"""
return optree.tree_leaves(
tree,
is_leaf=is_leaf,
none_is_leaf=True,
namespace="torch",
)
def tree_structure(
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> TreeSpec:
"""Get the treespec for a pytree.
See also :func:`tree_flatten`.
>>> tree = {"b": (2, [3, 4]), "a": 1, "c": None, "d": 5}
>>> tree_structure(tree)
PyTreeSpec({'b': (*, [*, *]), 'a': *, 'c': *, 'd': *}, NoneIsLeaf, namespace='torch')
>>> tree_structure(1)
PyTreeSpec(*, NoneIsLeaf, namespace='torch')
>>> tree_structure(None)
PyTreeSpec(*, NoneIsLeaf, namespace='torch')
Args:
tree (pytree): A pytree to flatten.
is_leaf (callable, optional): An extra leaf predicate function that will be called at each
flattening step. The function should have a single argument with signature
``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
leaf or not. If the function is not specified, the default pytree registry will be used.
Returns:
A treespec object representing the structure of the pytree.
"""
return optree.tree_structure( # type: ignore[return-value]
tree,
is_leaf=is_leaf,
none_is_leaf=True,
namespace="torch",
)
def tree_map(
func: Callable[..., Any],
tree: PyTree,
*rests: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
"""Map a multi-input function over pytree args to produce a new pytree.
See also :func:`tree_map_`.
>>> tree_map(lambda x: x + 1, {"x": 7, "y": (42, 64)})
{'x': 8, 'y': (43, 65)}
>>> tree_map(lambda x: x is None, {"x": 7, "y": (42, 64), "z": None})
{'x': False, 'y': (False, False), 'z': True}
If multiple inputs are given, the structure of the tree is taken from the first input;
subsequent inputs need only have ``tree`` as a prefix:
>>> tree_map(lambda x, y: [x] + y, [5, 6], [[7, 9], [1, 2]])
[[5, 7, 9], [6, 1, 2]]
Args:
func (callable): A function that takes ``1 + len(rests)`` arguments, to be applied at the
corresponding leaves of the pytrees.
tree (pytree): A pytree to be mapped over, with each leaf providing the first positional
argument to function ``func``.
rests (tuple of pytree): A tuple of pytrees, each of which has the same structure as
``tree`` or has ``tree`` as a prefix.
is_leaf (callable, optional): An extra leaf predicate function that will be called at each
flattening step. The function should have a single argument with signature
``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
leaf or not. If the function is not specified, the default pytree registry will be used.
Returns:
A new pytree with the same structure as ``tree`` but with the value at each leaf given by
``func(x, *xs)`` where ``x`` is the value at the corresponding leaf in ``tree`` and ``xs``
is the tuple of values at corresponding nodes in ``rests``.
"""
return optree.tree_map(
func,
tree,
*rests,
is_leaf=is_leaf,
none_is_leaf=True,
namespace="torch",
)
def tree_map_(
func: Callable[..., Any],
tree: PyTree,
*rests: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
"""Like :func:`tree_map`, but do an inplace call on each leaf and return the original tree.
See also :func:`tree_map`.
Args:
func (callable): A function that takes ``1 + len(rests)`` arguments, to be applied at the
corresponding leaves of the pytrees.
tree (pytree): A pytree to be mapped over, with each leaf providing the first positional
argument to function ``func``.
rests (tuple of pytree): A tuple of pytrees, each of which has the same structure as
``tree`` or has ``tree`` as a prefix.
is_leaf (callable, optional): An extra leaf predicate function that will be called at each
flattening step. The function should have a single argument with signature
``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
leaf or not. If the function is not specified, the default pytree registry will be used.
Returns:
The original ``tree`` with the value at each leaf is given by the side-effect of function
``func(x, *xs)`` (not the return value) where ``x`` is the value at the corresponding leaf
in ``tree`` and ``xs`` is the tuple of values at values at corresponding nodes in ``rests``.
"""
return optree.tree_map_(
func,
tree,
*rests,
is_leaf=is_leaf,
none_is_leaf=True,
namespace="torch",
)
Type2 = tuple[type[T], type[S]]
Type3 = tuple[type[T], type[S], type[U]]
if sys.version_info >= (3, 10):
TypeAny = Union[type[Any], tuple[type[Any], ...], types.UnionType]
else:
TypeAny = Union[type[Any], tuple[type[Any], ...]]
Fn2 = Callable[[Union[T, S]], R]
Fn3 = Callable[[Union[T, S, U]], R]
Fn = Callable[[T], R]
FnAny = Callable[[Any], R]
MapOnlyFn = Callable[[T], Callable[[Any], Any]]
# These specializations help with type inference on the lambda passed to this
# function
@overload
def map_only(type_or_types_or_pred: type[T], /) -> MapOnlyFn[Fn[T, Any]]:
...
@overload
def map_only(type_or_types_or_pred: Type2[T, S], /) -> MapOnlyFn[Fn2[T, S, Any]]:
...
@overload
def map_only(type_or_types_or_pred: Type3[T, S, U], /) -> MapOnlyFn[Fn3[T, S, U, Any]]:
...
# This specialization is needed for the implementations below that call
@overload
def map_only(type_or_types_or_pred: TypeAny, /) -> MapOnlyFn[FnAny[Any]]:
...
@overload
def map_only(type_or_types_or_pred: Callable[[Any], bool], /) -> MapOnlyFn[FnAny[Any]]:
...
def map_only(
type_or_types_or_pred: Union[TypeAny, Callable[[Any], bool]], /
) -> MapOnlyFn[FnAny[Any]]:
"""
Suppose you are writing a tree_map over tensors, leaving everything
else unchanged. Ordinarily you would have to write:
def go(t):
if isinstance(t, Tensor):
return ...
else:
return t
With this function, you only need to write:
@map_only(Tensor)
def go(t):
return ...
You can also directly use 'tree_map_only'
"""
if isinstance(type_or_types_or_pred, (type, tuple)) or (
sys.version_info >= (3, 10)
and isinstance(type_or_types_or_pred, types.UnionType)
):
def pred(x: Any) -> bool:
return isinstance(x, type_or_types_or_pred) # type: ignore[arg-type]
elif callable(type_or_types_or_pred):
pred = type_or_types_or_pred # type: ignore[assignment]
else:
raise TypeError("Argument must be a type, a tuple of types, or a callable.")
def wrapper(func: Callable[[T], Any]) -> Callable[[Any], Any]:
@functools.wraps(func)
def wrapped(x: T) -> Any:
if pred(x):
return func(x)
return x
return wrapped
return wrapper
@overload
def tree_map_only(
type_or_types_or_pred: type[T],
/,
func: Fn[T, Any],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
...
@overload
def tree_map_only(
type_or_types_or_pred: Type2[T, S],
/,
func: Fn2[T, S, Any],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
...
@overload
def tree_map_only(
type_or_types_or_pred: Type3[T, S, U],
/,
func: Fn3[T, S, U, Any],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
...
@overload
def tree_map_only(
type_or_types_or_pred: TypeAny,
/,
func: FnAny[Any],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
...
@overload
def tree_map_only(
type_or_types_or_pred: Callable[[Any], bool],
/,
func: FnAny[Any],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
...
def tree_map_only(
type_or_types_or_pred: Union[TypeAny, Callable[[Any], bool]],
/,
func: FnAny[Any],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
return tree_map(map_only(type_or_types_or_pred)(func), tree, is_leaf=is_leaf)
@overload
def tree_map_only_(
type_or_types_or_pred: type[T],
/,
func: Fn[T, Any],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
...
@overload
def tree_map_only_(
type_or_types_or_pred: Type2[T, S],
/,
func: Fn2[T, S, Any],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
...
@overload
def tree_map_only_(
type_or_types_or_pred: Type3[T, S, U],
/,
func: Fn3[T, S, U, Any],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
...
@overload
def tree_map_only_(
type_or_types_or_pred: TypeAny,
/,
func: FnAny[Any],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
...
@overload
def tree_map_only_(
type_or_types_or_pred: Callable[[Any], bool],
/,
func: FnAny[Any],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
...
def tree_map_only_(
type_or_types_or_pred: Union[TypeAny, Callable[[Any], bool]],
/,
func: FnAny[Any],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
return tree_map_(map_only(type_or_types_or_pred)(func), tree, is_leaf=is_leaf)
def tree_all(
pred: Callable[[Any], bool],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> bool:
flat_args = tree_iter(tree, is_leaf=is_leaf)
return all(map(pred, flat_args))
def tree_any(
pred: Callable[[Any], bool],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> bool:
flat_args = tree_iter(tree, is_leaf=is_leaf)
return any(map(pred, flat_args))
@overload
def tree_all_only(
type_or_types: type[T],
/,
pred: Fn[T, bool],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> bool:
...
@overload
def tree_all_only(
type_or_types: Type2[T, S],
/,
pred: Fn2[T, S, bool],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> bool:
...
@overload
def tree_all_only(
type_or_types: Type3[T, S, U],
/,
pred: Fn3[T, S, U, bool],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> bool:
...
def tree_all_only(
type_or_types: TypeAny,
/,
pred: FnAny[bool],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> bool:
flat_args = tree_iter(tree, is_leaf=is_leaf)
return all(pred(x) for x in flat_args if isinstance(x, type_or_types))
@overload
def tree_any_only(
type_or_types: type[T],
/,
pred: Fn[T, bool],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> bool:
...
@overload
def tree_any_only(
type_or_types: Type2[T, S],
/,
pred: Fn2[T, S, bool],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> bool:
...
@overload
def tree_any_only(
type_or_types: Type3[T, S, U],
/,
pred: Fn3[T, S, U, bool],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> bool:
...
def tree_any_only(
type_or_types: TypeAny,
/,
pred: FnAny[bool],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> bool:
flat_args = tree_iter(tree, is_leaf=is_leaf)
return any(pred(x) for x in flat_args if isinstance(x, type_or_types))
def broadcast_prefix(
prefix_tree: PyTree,
full_tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> list[Any]:
"""Return a list of broadcasted leaves in ``prefix_tree`` to match the number of leaves in ``full_tree``.
If a ``prefix_tree`` is a prefix of a ``full_tree``, this means the ``full_tree`` can be
constructed by replacing the leaves of ``prefix_tree`` with appropriate **subtrees**.
This function returns a list of leaves with the same size as ``full_tree``. The leaves are
replicated from ``prefix_tree``. The number of replicas is determined by the corresponding
subtree in ``full_tree``.
>>> broadcast_prefix(1, [1, 2, 3])
[1, 1, 1]
>>> broadcast_prefix([1, 2, 3], [1, 2, 3])
[1, 2, 3]
>>> broadcast_prefix([1, 2, 3], [1, 2, 3, 4])
Traceback (most recent call last):
...
ValueError: list arity mismatch; expected: 3, got: 4; list: [1, 2, 3, 4].
>>> broadcast_prefix([1, 2, 3], [1, 2, (3, 4)])
[1, 2, 3, 3]
>>> broadcast_prefix([1, 2, 3], [1, 2, {"a": 3, "b": 4, "c": (None, 5)}])
[1, 2, 3, 3, 3, 3]
Args:
prefix_tree (pytree): A pytree with the same structure as a prefix of ``full_tree``.
full_tree (pytree): A pytree with the same structure as a suffix of ``prefix_tree``.
is_leaf (callable, optional): An extra leaf predicate function that will be called at each
flattening step. The function should have a single argument with signature
``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
leaf or not. If the function is not specified, the default pytree registry will be used.
Returns:
A list of leaves in ``prefix_tree`` broadcasted to match the number of leaves in ``full_tree``.
"""
result: list[Any] = []
def add_leaves(x: Any, subtree: PyTree) -> None:
subtreespec = tree_structure(subtree, is_leaf=is_leaf)
result.extend([x] * subtreespec.num_leaves)
tree_map_(
add_leaves,
prefix_tree,
full_tree,
is_leaf=is_leaf,
)
return result
# Broadcasts a pytree to the provided TreeSpec and returns the flattened
# values. If this is not possible, then this function returns None.
#
# For example, given pytree=0 and spec=TreeSpec(list, None, [LeafSpec(), LeafSpec()]),
# would return [0, 0]. This is useful for part of the vmap implementation:
# a user can pass in vmap(fn, in_dims)(*inputs). `in_dims` should be
# broadcastable to the tree structure of `inputs` and we use
# _broadcast_to_and_flatten to check this.
def _broadcast_to_and_flatten(
tree: PyTree,
treespec: TreeSpec,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> Optional[list[Any]]:
assert _is_pytreespec_instance(treespec)
full_tree = tree_unflatten([0] * treespec.num_leaves, treespec)
try:
return broadcast_prefix(tree, full_tree, is_leaf=is_leaf)
except ValueError:
return None
def treespec_dumps(treespec: TreeSpec, protocol: Optional[int] = None) -> str:
"""Serialize a treespec to a JSON string."""
if not _is_pytreespec_instance(treespec):
raise TypeError(
f"treespec_dumps(treespec): Expected `treespec` to be instance of "
f"PyTreeSpec but got item of type {type(treespec)}."
)
dummy_tree = tree_unflatten([0] * treespec.num_leaves, treespec)
orig_treespec = python_pytree.tree_structure(dummy_tree)
return python_pytree.treespec_dumps(orig_treespec, protocol=protocol)
@functools.lru_cache
def treespec_loads(serialized: str) -> TreeSpec:
"""Deserialize a treespec from a JSON string."""
orig_treespec = python_pytree.treespec_loads(serialized)
dummy_tree = python_pytree.tree_unflatten(
[0] * orig_treespec.num_leaves,
orig_treespec,
)
treespec = tree_structure(dummy_tree)
return treespec
class _DummyLeaf:
def __repr__(self) -> str:
return "*"
def treespec_pprint(treespec: TreeSpec) -> str:
dummy_tree = tree_unflatten(
[_DummyLeaf() for _ in range(treespec.num_leaves)],
treespec,
)
return repr(dummy_tree)
class LeafSpecMeta(type(TreeSpec)): # type: ignore[misc]
def __instancecheck__(self, instance: object) -> bool:
return _is_pytreespec_instance(instance) and instance.is_leaf()
class LeafSpec(TreeSpec, metaclass=LeafSpecMeta):
def __new__(cls) -> "LeafSpec":
return optree.treespec_leaf(none_is_leaf=True) # type: ignore[return-value]
def tree_flatten_with_path(
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> tuple[list[tuple[KeyPath, Any]], TreeSpec]:
"""Flattens a pytree like :func:`tree_flatten`, but also returns each leaf's key path.
Args:
tree: a pytree to flatten. If it contains a custom type, that type must be
registered with an appropriate `tree_flatten_with_path_fn` when registered
with :func:`register_pytree_node`.
is_leaf: An extra leaf predicate function that will be called at each
flattening step. The function should have a single argument with signature
``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
leaf or not. If the function is not specified, the default pytree registry will be used.
Returns:
A tuple where the first element is a list of (key path, leaf) pairs, and the
second element is a :class:`TreeSpec` representing the structure of the flattened
tree.
"""
raise NotImplementedError("KeyPaths are not yet supported in cxx_pytree.")
def tree_leaves_with_path(
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> list[tuple[KeyPath, Any]]:
"""Gets the leaves of a pytree like ``tree_leaves`` and returns each leaf's key path.
Args:
tree: a pytree. If it contains a custom type, that type must be
registered with an appropriate `tree_flatten_with_path_fn` when registered
with :func:`register_pytree_node`.
is_leaf: An extra leaf predicate function that will be called at each
flattening step. The function should have a single argument with signature
``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
leaf or not. If the function is not specified, the default pytree registry will be used.
Returns:
A list of (key path, leaf) pairs.
"""
raise NotImplementedError("KeyPaths are not yet supported in cxx_pytree.")
def tree_map_with_path(
func: Callable[..., Any],
tree: PyTree,
*rests: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
"""Like :func:`tree_map`, but the provided callable takes an additional key path argument.
Args:
func: A function that takes ``2 + len(rests)`` arguments, to be applied at the
corresponding leaves of the pytrees. The first positional argument
to ``func`` is the key path of the leaf in question. The second
positional argument is the value of the leaf.
tree: A pytree to be mapped over, with each leaf providing the first positional
argument to function ``func``.
rests: A tuple of pytrees, each of which has the same structure as
``tree`` or has ``tree`` as a prefix.
is_leaf: An extra leaf predicate function that will be called at each
flattening step. The function should have a single argument with signature
``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
leaf or not. If the function is not specified, the default pytree registry will be used.
Returns
A new pytree with the same structure as ``tree`` but with the value at each leaf given by
``func(keypath, x, *xs)`` where ``keypath`` is the key path at the
corresponding leaf in ``tree``, ``x`` is the value at that leaf, and
``xs`` is the tuple of values at corresponding nodes in ``rests``.
"""
raise NotImplementedError("KeyPaths are not yet supported in cxx_pytree.")
def keystr(kp: KeyPath) -> str:
"""Given a key path, return a pretty-printed representation."""
raise NotImplementedError("KeyPaths are not yet supported in cxx_pytree.")
def key_get(obj: Any, kp: KeyPath) -> Any:
"""Given an object and a key path, return the value at the key path."""
raise NotImplementedError("KeyPaths are not yet supported in cxx_pytree.")
with python_pytree._NODE_REGISTRY_LOCK:
python_pytree._cxx_pytree_imported = True
args, kwargs = (), {} # type: ignore[var-annotated]
for args, kwargs in python_pytree._cxx_pytree_pending_imports:
_private_register_pytree_node(*args, **kwargs)
python_pytree._cxx_pytree_pending_imports.clear()
del args, kwargs
```
|
==============================================================================================================
SOURCE CODE FILE: _device.py
LINES: 1
SIZE: 3.81 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_device.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from typing import Optional
import torch
from torch.overrides import TorchFunctionMode, _pop_mode, _push_mode
from torch.utils._contextlib import context_decorator
from torch._C import _len_torch_function_stack
import functools
CURRENT_DEVICE: Optional[torch.device] = None
@functools.lru_cache(1)
def _device_constructors():
return {
# standard ones
torch.empty,
torch.empty_permuted,
torch.empty_strided,
torch.empty_quantized,
torch.ones,
torch.arange,
torch.bartlett_window,
torch.blackman_window,
torch.eye,
torch.fft.fftfreq,
torch.fft.rfftfreq,
torch.full,
torch.fill,
torch.hamming_window,
torch.hann_window,
torch.kaiser_window,
torch.linspace,
torch.logspace,
torch.nested.nested_tensor,
# This function doesn't actually take a device argument
# torch.normal,
torch.ones,
torch.rand,
torch.randn,
torch.randint,
torch.randperm,
torch.range,
torch.sparse_coo_tensor,
torch.sparse_compressed_tensor,
torch.sparse_csr_tensor,
torch.sparse_csc_tensor,
torch.sparse_bsr_tensor,
torch.sparse_bsc_tensor,
torch.tril_indices,
torch.triu_indices,
torch.vander,
torch.zeros,
torch.asarray,
# weird ones
torch.tensor,
torch.as_tensor,
torch.scalar_tensor,
torch.asarray,
}
# NB: This is directly called from C++ in torch/csrc/Device.cpp
class DeviceContext(TorchFunctionMode):
def __init__(self, device):
self.device = torch.device(device)
def __enter__(self):
global CURRENT_DEVICE
self.old_device = CURRENT_DEVICE
CURRENT_DEVICE = self.device
# We need to put the device at the bottom of the stack
# If we set default device within a function mode context
# exiting that context mode will pop the device function mode off
# of the stack incorrectly
cur_stack = [_pop_mode() for _ in range(_len_torch_function_stack())]
_push_mode(self)
for mode in reversed(cur_stack):
_push_mode(mode)
def __exit__(self, exc_type, exc_val, exc_tb):
global CURRENT_DEVICE
CURRENT_DEVICE = self.old_device
cur_stack = []
# Invariant: there should only be one DeviceContext on the stack at any time
# (At the bottom), pop all mdoes until we hit the bottom, assert it's a DeviceContext
# or else someone else has popped it!
for _ in range(_len_torch_function_stack() - 1):
mode = _pop_mode()
assert not isinstance(mode, DeviceContext)
cur_stack.append(mode)
if _len_torch_function_stack() > 0:
mode = _pop_mode()
assert isinstance(mode, DeviceContext)
for mode in reversed(cur_stack):
_push_mode(mode)
def __torch_function__(self, func, types, args=(), kwargs=None):
kwargs = kwargs or {}
if func in _device_constructors() and kwargs.get('device') is None:
kwargs['device'] = self.device
return func(*args, **kwargs)
# NB: This is directly called from C++ in torch/csrc/Device.cpp
def device_decorator(device, func):
return context_decorator(lambda: device, func)
def set_device(device):
"""
Set the default device inside of the wrapped function by decorating it with this function.
If you would like to use this as a context manager, use device as a
context manager directly, e.g., ``with torch.device(device)``.
"""
return lambda func: device_decorator(torch.device(device), func)
```
|
==================================================================================================================
SOURCE CODE FILE: _exposed_in.py
LINES: 1
SIZE: 0.70 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_exposed_in.py
ENCODING: utf-8
```py
from typing import Callable, TypeVar
F = TypeVar("F")
# Allows one to expose an API in a private submodule publicly as per the definition
# in PyTorch's public api policy.
#
# It is a temporary solution while we figure out if it should be the long-term solution
# or if we should amend PyTorch's public api policy. The concern is that this approach
# may not be very robust because it's not clear what __module__ is used for.
# However, both numpy and jax overwrite the __module__ attribute of their APIs
# without problem, so it seems fine.
def exposed_in(module: str) -> Callable[[F], F]:
def wrapper(fn: F) -> F:
fn.__module__ = module
return fn
return wrapper
```
|
================================================================================================================
SOURCE CODE FILE: _filelock.py
LINES: 1
SIZE: 1.57 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_filelock.py
ENCODING: utf-8
```py
from types import TracebackType
from typing import Optional
from typing_extensions import Self
from filelock import FileLock as base_FileLock
from torch.monitor import _WaitCounter
class FileLock(base_FileLock):
"""
This behaves like a normal file lock.
However, it adds waitcounters for acquiring and releasing the filelock
as well as for the critical region within it.
pytorch.filelock.enter - While we're acquiring the filelock.
pytorch.filelock.region - While we're holding the filelock and doing work.
pytorch.filelock.exit - While we're releasing the filelock.
"""
def __enter__(self) -> Self:
self.region_counter = _WaitCounter("pytorch.filelock.region").guard()
with _WaitCounter("pytorch.filelock.enter").guard():
result = super().__enter__()
self.region_counter.__enter__()
return result
def __exit__(
self,
exc_type: Optional[type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
self.region_counter.__exit__()
with _WaitCounter("pytorch.filelock.exit").guard():
# Returns nothing per
# https://github.com/tox-dev/filelock/blob/57f488ff8fdc2193572efe102408fb63cfefe4e4/src/filelock/_api.py#L379
super().__exit__(exc_type, exc_value, traceback)
# Returns nothing per
# https://github.com/pytorch/pytorch/blob/0f6bfc58a2cfb7a5c052bea618ab62becaf5c912/torch/csrc/monitor/python_init.cpp#L315
return None
```
|
=====================================================================================================================
SOURCE CODE FILE: _foreach_utils.py
LINES: 1
SIZE: 2.32 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_foreach_utils.py
ENCODING: utf-8
```py
from typing import Optional
import torch
from torch import Tensor
from torch.autograd.grad_mode import no_grad
from typing_extensions import TypeAlias
def _get_foreach_kernels_supported_devices() -> list[str]:
r"""Return the device type list that supports foreach kernels."""
return ["cuda", "xpu", torch._C._get_privateuse1_backend_name()]
def _get_fused_kernels_supported_devices() -> list[str]:
r"""Return the device type list that supports fused kernels in optimizer."""
return ["mps", "cuda", "xpu", "hpu", "cpu", torch._C._get_privateuse1_backend_name()]
TensorListList: TypeAlias = list[list[Optional[Tensor]]]
Indices: TypeAlias = list[int]
_foreach_supported_types = [torch.Tensor]
# This util function splits tensors into groups by device and dtype, which is useful before sending
# tensors off to a foreach implementation, which requires tensors to be on one device and dtype.
# If tensorlistlist contains more than one tensorlist, the following assumptions are made BUT NOT verified:
# - tensorlists CAN be None
# - all tensors in the first specified list cannot be None
# - given an index i, all specified tensorlist[i]s match in dtype and device
# with_indices (bool, optional): whether to track previous indices as the last list per dictionary entry.
# It comes in handy if there are Nones or literals in the tensorlists that are getting scattered out.
# Whereas mutating a tensor in the resulting split-up tensorlists WILL propagate changes back to the
# original input tensorlists, changing up Nones/literals WILL NOT propagate, and manual propagation
# may be necessary. Check out torch/optim/sgd.py for an example.
@no_grad()
def _group_tensors_by_device_and_dtype(
tensorlistlist: TensorListList,
with_indices: bool = False,
) -> dict[tuple[torch.device, torch.dtype], tuple[TensorListList, Indices]]:
return torch._C._group_tensors_by_device_and_dtype(tensorlistlist, with_indices)
def _device_has_foreach_support(device: torch.device) -> bool:
return device.type in (_get_foreach_kernels_supported_devices() + ["cpu"]) and not torch.jit.is_scripting()
def _has_foreach_support(tensors: list[Tensor], device: torch.device) -> bool:
return _device_has_foreach_support(device) and all(t is None or type(t) in _foreach_supported_types for t in tensors)
```
|
==============================================================================================================
SOURCE CODE FILE: _freeze.py
LINES: 6
SIZE: 10.04 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_freeze.py
ENCODING: utf-8
```py
# mypy: allow-untyped-decorators
# mypy: allow-untyped-defs
"""
Freeze Python packages.
Freezing makes it possible to ship arbitrary Python modules as part of a C++
library. The Python source of the module is compiled to bytecode and written
to `.c` files, to be imported by Python's built-in FrozenImporter.
In a normal Python installation, FrozenImporter is only used to bootstrap the
initialization of the import machinery. Python's importers are defined in
Python (see `_bootstrap.py` and `_bootstrap_external.py`) but need to be
retrieved before any importers are available. Freezing the module bytecode
resolves this circular dependency.
This script will freeze the Python standard library. It produces two things:
- Bytecode files: A set of `.c` that define C variables containing Python bytecode.
- Main file: A `main.c` file listing all of these modules in the right form to be
consumed by FrozenImporter.
The library that wishes to these modules make them available to the local
Python instance by extending `PyImport_FrozenModules` appropriately (see
https://docs.python.org/3/c-api/import.html#c.PyImport_FrozenModules).
"""
import argparse
import functools
import itertools
import marshal
import os
import types
from dataclasses import dataclass
from pathlib import Path
PATH_MARKER = "<Generated by torch::deploy>"
MAIN_INCLUDES = """#include <Python.h>
"""
MAIN_PREFIX_TEMPLATE = """
// Compiled standard library modules. These should be appended to the existing
// `PyImport_FrozenModules` that ships with CPython.
struct _frozen {}[] = {{
"""
FAKE_PREFIX = MAIN_PREFIX_TEMPLATE.format("_PyImport_FrozenModules")
MAIN_SUFFIX = """\
{0, 0, 0} /* sentinel */
};
"""
# Exclude some standard library modules to:
# 1. Slim down the final frozen lib.
# 2. Remove functionality we don't want to support.
DENY_LIST = [
# Interface to unix databases
"dbm",
# ncurses bindings (terminal interfaces)
"curses",
# Tcl/Tk GUI
"tkinter",
"tkinter",
# Tests for the standard library
"test",
"tests",
"idle_test",
"__phello__.foo.py",
# importlib frozen modules. These are already baked into CPython.
"_bootstrap.py",
"_bootstrap_external.py",
]
NUM_BYTECODE_FILES = 5
def indent_msg(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
args[0].indent += 1
ret = fn(*args, **kwargs)
args[0].indent -= 1
return ret
return wrapper
@dataclass
class FrozenModule:
# The fully qualified module name, e.g. 'foo.bar.baz'
module_name: str
# The name of the C variable that holds the bytecode, e.g. 'M_foo__bar__baz'
c_name: str
# The size of the C variable. Negative if this module is a package.
size: int
# The frozen bytecode
bytecode: bytes
class Freezer:
def __init__(self, verbose: bool):
self.frozen_modules: list[FrozenModule] = []
self.indent: int = 0
self.verbose: bool = verbose
def msg(self, path: Path, code: str):
if not self.verbose:
return
# P: package dir
# F: python file
# S: skipped (not a package dir)
# X: skipped (deny-listed)
# N: skipped (not a python file)
print(" " * self.indent, end="")
print(f"{code} {path}")
def write_bytecode(self, install_root):
"""
Write the `.c` files containing the frozen bytecode.
Shared frozen modules evenly across the files.
"""
bytecode_file_names = [f"bytecode_{i}.c" for i in range(NUM_BYTECODE_FILES)]
bytecode_files = [
open(os.path.join(install_root, name), "w") for name in bytecode_file_names
]
it = itertools.cycle(bytecode_files)
for m in self.frozen_modules:
self.write_frozen(m, next(it))
for f in bytecode_files:
f.close()
def write_main(self, install_root, oss, symbol_name):
"""Write the `main.c` file containing a table enumerating all the frozen modules."""
with open(os.path.join(install_root, "main.c"), "w") as outfp:
outfp.write(MAIN_INCLUDES)
for m in self.frozen_modules:
outfp.write(f"extern unsigned char {m.c_name}[];\n")
outfp.write(MAIN_PREFIX_TEMPLATE.format(symbol_name))
for m in self.frozen_modules:
outfp.write(f'\t{{"{m.module_name}", {m.c_name}, {m.size}}},\n')
outfp.write(MAIN_SUFFIX)
if oss:
outfp.write(FAKE_PREFIX)
outfp.write(MAIN_SUFFIX)
def write_frozen(self, m: FrozenModule, outfp):
"""Write a single frozen module's bytecode out to a C variable."""
outfp.write(f"unsigned char {m.c_name}[] = {{")
for i in range(0, len(m.bytecode), 16):
outfp.write("\n\t")
for c in bytes(m.bytecode[i : i + 16]):
outfp.write(f"{c:d},")
outfp.write("\n};\n")
def compile_path(self, path: Path, top_package_path: Path):
"""Entry point for compiling a Path object."""
if path.is_dir():
self.compile_package(path, top_package_path)
else:
self.compile_file(path, top_package_path)
@indent_msg
def compile_package(self, path: Path, top_package_path: Path):
"""Compile all the files within a Python package dir."""
assert path.is_dir()
if path.name in DENY_LIST:
self.msg(path, "X")
return
# Python packages are directories that have __init__.py in them.
is_package_dir = any(child.name == "__init__.py" for child in path.iterdir())
if not is_package_dir:
self.msg(path, "S")
return
self.msg(path, "P")
# Recursively compile all children in this dir
for child in path.iterdir():
self.compile_path(child, top_package_path)
def get_module_qualname(self, file_path: Path, top_package_path: Path) -> list[str]:
# `path` looks like 'Lib/foo/bar/baz.py'
# chop off 'Lib/' to get something that represents a Python module hierarchy.
# e.g. 'foo/bar/baz.py', which maps to 'foo.bar.baz'
normalized_path = file_path.relative_to(top_package_path.parent)
if normalized_path.name == "__init__.py":
# Special handling for `__init__.py`. In this case, this file
# specifies that the containing directory should be treated as a package.
# For 'foo/bar/baz/__init__.py':
# - The module name is 'baz'
module_basename = normalized_path.parent.name
# - The parent is foo.bar (need to shave off the 'baz')
module_parent = normalized_path.parent.parent.parts
else:
module_basename = normalized_path.stem
module_parent = normalized_path.parent.parts
return list(module_parent) + [module_basename]
def compile_string(self, file_content: str) -> types.CodeType:
# instead of passing in the real build time path to 'compile', we
# pass in a marker instead. This prevents the build time path being
# leaked to runtime. That path may not be available at runtime.
# Setting the path to a mark make sure it's a hard error rather
# than a flaky error when inspect module tries to retrieve python source
# code during torchscripting.
path_marker = PATH_MARKER
return compile(file_content, path_marker, "exec")
@indent_msg
def compile_file(self, path: Path, top_package_path: Path):
"""
Compile a Python source file to frozen bytecode.
Append the result to `self.frozen_modules`.
"""
assert path.is_file()
if path.suffix != ".py":
self.msg(path, "N")
return
if path.name in DENY_LIST:
self.msg(path, "X")
return
self.msg(path, "F")
module_qualname = self.get_module_qualname(path, top_package_path)
module_mangled_name = "__".join(module_qualname)
c_name = "M_" + module_mangled_name
with open(path) as src_file:
co = self.compile_string(src_file.read())
bytecode = marshal.dumps(co)
size = len(bytecode)
if path.name == "__init__.py":
# Python packages are signified by negative size.
size = -size
self.frozen_modules.append(
FrozenModule(".".join(module_qualname), c_name, size, bytecode)
)
def main() -> None:
parser = argparse.ArgumentParser(description="Compile py source")
parser.add_argument("paths", nargs="*", help="Paths to freeze.")
parser.add_argument("--verbose", action="store_true", help="Print debug logs")
parser.add_argument(
"--install-dir", "--install_dir", help="Root directory for all output files"
)
parser.add_argument(
"--oss",
action="store_true",
help="If it's OSS build, add a fake _PyImport_FrozenModules",
)
parser.add_argument(
"--symbol-name",
"--symbol_name",
help="The name of the frozen module array symbol to generate",
default="_PyImport_FrozenModules_torch",
)
args = parser.parse_args()
f = Freezer(args.verbose)
for p in args.paths:
path = Path(p)
if path.is_dir() and not Path.exists(path / "__init__.py"):
# this 'top level path p' is a standard directory containing modules,
# not a module itself
# each 'mod' could be a dir containing __init__.py or .py file
# NB: sorted to make sure this is deterministic
for mod in sorted(path.glob("*")):
f.compile_path(mod, mod)
else:
f.compile_path(path, path)
f.write_bytecode(args.install_dir)
f.write_main(args.install_dir, args.oss, args.symbol_name)
if __name__ == "__main__":
main() # pragma: no cover
```
|
=================================================================================================================
SOURCE CODE FILE: _functools.py
LINES: 1
SIZE: 1.43 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_functools.py
ENCODING: utf-8
```py
import functools
from typing import Callable, TypeVar
from typing_extensions import Concatenate, ParamSpec
_P = ParamSpec("_P")
_T = TypeVar("_T")
_C = TypeVar("_C")
# Sentinel used to indicate that cache lookup failed.
_cache_sentinel = object()
def cache_method(
f: Callable[Concatenate[_C, _P], _T]
) -> Callable[Concatenate[_C, _P], _T]:
"""
Like `@functools.cache` but for methods.
`@functools.cache` (and similarly `@functools.lru_cache`) shouldn't be used
on methods because it caches `self`, keeping it alive
forever. `@cache_method` ignores `self` so won't keep `self` alive (assuming
no cycles with `self` in the parameters).
Footgun warning: This decorator completely ignores self's properties so only
use it when you know that self is frozen or won't change in a meaningful
way (such as the wrapped function being pure).
"""
cache_name = "_cache_method_" + f.__name__
@functools.wraps(f)
def wrap(self: _C, *args: _P.args, **kwargs: _P.kwargs) -> _T:
assert not kwargs
if not (cache := getattr(self, cache_name, None)):
cache = {}
setattr(self, cache_name, cache)
cached_value = cache.get(args, _cache_sentinel)
if cached_value is not _cache_sentinel:
return cached_value
value = f(self, *args, **kwargs)
cache[args] = value
return value
return wrap
```
|
========================================================================================================================
SOURCE CODE FILE: _get_clean_triton.py
LINES: 5
SIZE: 5.11 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_get_clean_triton.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import argparse
import os
import re
from pathlib import Path
def remove_triton_function_declaration(source_code: str) -> str:
remove_head = re.sub(r"(\n.+\s\'\'\'\n)", "\n", source_code)
remove_tail = re.sub(r"(\'\'\'\,.+)", "\n", remove_head)
return remove_tail
def remove_async_compile(source_code: str) -> str:
remove_top_level = str.replace(source_code, "async_compile = AsyncCompile()", "")
remove_compile = str.replace(remove_top_level, "async_compile.wait(globals())", "")
remove_del = str.replace(remove_compile, "del async_compile", "")
return remove_del
def rename_kernels(source_code: str) -> str:
pattern = r"(\w+)\s*=\s*async_compile\.triton\('triton_',\s"
triton_kernel_decl = "def triton_"
matches = [
(match.end(), match.group(1))
for match in re.finditer(pattern, source_code, re.DOTALL)
]
# Starting from the last match to avoid issues with shifting indices after replacements
for end_index, captured_string in reversed(matches):
# Find the index of the next "B" after the current match
index_of_B = source_code.find(triton_kernel_decl, end_index)
if index_of_B != -1:
# Replace the triton_kernel_decl with the captured string
source_code = (
source_code[:index_of_B]
+ f"def {captured_string}"
+ source_code[index_of_B + len(triton_kernel_decl) :]
)
else:
# If triton_kernel_decl is not found after the current match, continue to the next
continue
return source_code
def merge_params(original_params: list[str], new_params: list[str]) -> list[str]:
for idx in range(len(new_params)):
if new_params[idx] == "T":
new_params[idx] = original_params[idx]
return new_params
def add_launch_params(
original: str, kernel_to_params: dict[str, tuple[str, str]]
) -> str:
# Regex to match the function call in the original string
pattern = r"(\w+)\.run\((.*)\)"
def replace(match) -> str:
# Extract parts from the regex match
func_name = match.group(1)
params = match.group(2)
new_params, grid = kernel_to_params[func_name]
new_params = merge_params(params.split(", "), new_params.split(", "))
# Format the new function call
new_string = f"{func_name}[{grid}]({', '.join(new_params)})"
return new_string
transformed = re.sub(pattern, replace, original)
remove_inductor_wrappers = re.sub(
r"@triton_heuristics[^@]*@triton.jit",
r"@triton.jit",
transformed,
flags=re.DOTALL,
)
return remove_inductor_wrappers
def process_file(input_filename: str, output_filename: str) -> str:
with open(input_filename) as file:
source_code = file.read()
transformed_code = source_code
if "def triton_(" in source_code:
raise RuntimeError(
"Need to run original Pytorch code generating kernels with TORCHINDUCTOR_UNIQUE_KERNEL_NAMES=1"
)
# transformed_code = rename_kernels(transformed_code)
transformed_code = remove_triton_function_declaration(transformed_code)
transformed_code = remove_async_compile(transformed_code)
launch_params_filename = f"{input_filename}.launch_params"
if not os.path.exists(launch_params_filename):
raise RuntimeError(
f"Missing {launch_params_filename}. Run `TORCHINDUCTOR_DUMP_LAUNCH_PARAMS=1 python {input_filename} first."
)
with open(launch_params_filename) as f:
launch_params_meta = f.readlines()
split_params = [i.split("|") for i in launch_params_meta]
kernel_args_grid = {a.strip(): (b.strip(), c.strip()) for a, b, c in split_params}
transformed_code = add_launch_params(transformed_code, kernel_args_grid)
with open(output_filename, "w") as file:
file.write(transformed_code)
return transformed_code
def get_clean_triton(
input_path: Path, output_path: Path = Path("triton_only_repro.py")
):
"""Run experiments and output results to file
Args:
input_path (Optional[Path]): Path to inductor generated output codede
output_path (Optional[Path]): Path to write out the new python file
"""
return process_file(str(input_path), str(output_path))
if __name__ == "__main__":
"""Sample usage:
# Running sweep
python inputcode.py
"""
parser = argparse.ArgumentParser(
description="Clean Inductor generated code to remove Inductor dependencies"
)
# Add the arguments
parser.add_argument(
"input_path", type=Path, help="Path to inductor generated output code"
)
parser.add_argument(
"--output_path",
type=Path,
default=Path("triton_only_repro.py"),
help="Path to write out the clean triton output",
)
# Parse the arguments
args = parser.parse_args()
# Call the function with parsed arguments
result = get_clean_triton(args.input_path, args.output_path)
```
|
====================================================================================================================
SOURCE CODE FILE: _import_utils.py
LINES: 1
SIZE: 1.34 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_import_utils.py
ENCODING: utf-8
```py
import functools
import importlib.util
from types import ModuleType
from typing import Optional
import torch
def _check_module_exists(name: str) -> bool:
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. It avoids third party libraries breaking assumptions of some of
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
try:
spec = importlib.util.find_spec(name)
return spec is not None
except ImportError:
return False
@functools.lru_cache
def dill_available() -> bool:
return (
_check_module_exists("dill")
# dill fails to import under torchdeploy
and not torch._running_with_deploy()
)
@functools.lru_cache
def import_dill() -> Optional[ModuleType]:
if not dill_available():
return None
import dill
# XXX: By default, dill writes the Pickler dispatch table to inject its
# own logic there. This globally affects the behavior of the standard library
# pickler for any user who transitively depends on this module!
# Undo this extension to avoid altering the behavior of the pickler globally.
dill.extend(use_dill=False)
return dill
```
|
==================================================================================================================
SOURCE CODE FILE: _mode_utils.py
LINES: 1
SIZE: 0.26 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_mode_utils.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import torch
from typing import TypeVar
T = TypeVar('T')
# returns if all are the same mode
def all_same_mode(modes):
return all(tuple(mode == modes[0] for mode in modes))
no_dispatch = torch._C._DisableTorchDispatch
```
|
===================================================================================================================
SOURCE CODE FILE: _ordered_set.py
LINES: 1
SIZE: 5.55 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_ordered_set.py
ENCODING: utf-8
```py
from __future__ import annotations
from collections.abc import (
Iterable,
Iterator,
MutableSet,
Reversible,
Set as AbstractSet,
)
from typing import Any, cast, Optional, TypeVar
T = TypeVar("T")
T_co = TypeVar("T_co", covariant=True)
__all__ = ["OrderedSet"]
class OrderedSet(MutableSet[T], Reversible[T]):
"""
Insertion ordered set, similar to OrderedDict.
"""
__slots__ = ("_dict",)
def __init__(self, iterable: Optional[Iterable[T]] = None):
self._dict = dict.fromkeys(iterable, None) if iterable is not None else {}
@staticmethod
def _from_dict(dict_inp: dict[T, None]) -> OrderedSet[T]:
s: OrderedSet[T] = OrderedSet()
s._dict = dict_inp
return s
#
# Required overriden abstract methods
#
def __contains__(self, elem: object) -> bool:
return elem in self._dict
def __iter__(self) -> Iterator[T]:
return iter(self._dict)
def __len__(self) -> int:
return len(self._dict)
def __reversed__(self) -> Iterator[T]:
return reversed(self._dict)
def add(self, elem: T) -> None:
self._dict[elem] = None
def discard(self, elem: T) -> None:
self._dict.pop(elem, None)
def clear(self) -> None:
# overridden because MutableSet impl is slow
self._dict.clear()
# Unimplemented set() methods in _collections_abc.MutableSet
@classmethod
def _wrap_iter_in_set(cls, other: Any) -> Any:
"""
Wrap non-Set Iterables in OrderedSets
Some of the magic methods are more strict on input types than
the public apis, so we need to wrap inputs in sets.
"""
if not isinstance(other, AbstractSet) and isinstance(other, Iterable):
return cls(other)
else:
return other
def pop(self) -> T:
if not self:
raise KeyError("pop from an empty set")
return self._dict.popitem()[0]
def copy(self) -> OrderedSet[T]:
return OrderedSet._from_dict(self._dict.copy())
def difference(self, *others: Iterable[T]) -> OrderedSet[T]:
res = self.copy()
res.difference_update(*others)
return res
def difference_update(self, *others: Iterable[T]) -> None:
for other in others:
self -= other # type: ignore[arg-type]
def update(self, *others: Iterable[T]) -> None:
for other in others:
self |= other
def intersection(self, *others: Iterable[T]) -> OrderedSet[T]:
res = self.copy()
for other in others:
if other is not self:
res &= other # type: ignore[arg-type]
return res
def intersection_update(self, *others: Iterable[T]) -> None:
for other in others:
self &= other # type: ignore[arg-type]
def issubset(self, other: Iterable[T]) -> bool:
return self <= self._wrap_iter_in_set(other)
def issuperset(self, other: Iterable[T]) -> bool:
return self >= self._wrap_iter_in_set(other)
def symmetric_difference(self, other: Iterable[T]) -> OrderedSet[T]:
return self ^ other # type: ignore[operator]
def symmetric_difference_update(self, other: Iterable[T]) -> None:
self ^= other # type: ignore[arg-type]
def union(self, *others: Iterable[T]) -> OrderedSet[T]:
res = self.copy()
for other in others:
if other is self:
continue
res |= other
return res
# Specify here for correct type inference, otherwise would
# return AbstractSet[T]
def __sub__(self, other: AbstractSet[T_co]) -> OrderedSet[T]:
# following cpython set impl optimization
if isinstance(other, OrderedSet) and (len(self) * 4) > len(other):
out = self.copy()
out -= other
return out
return cast(OrderedSet[T], super().__sub__(other))
def __ior__(self, other: Iterable[T]) -> OrderedSet[T]: # type: ignore[misc, override] # noqa: PYI034
if isinstance(other, OrderedSet):
self._dict.update(other._dict)
return self
return super().__ior__(other) # type: ignore[arg-type]
def __eq__(self, other: object) -> bool:
if isinstance(other, OrderedSet):
return self._dict == other._dict
return super().__eq__(other)
def __ne__(self, other: object) -> bool:
if isinstance(other, OrderedSet):
return self._dict != other._dict
return super().__ne__(other)
def __or__(self, other: AbstractSet[T_co]) -> OrderedSet[T]:
return cast(OrderedSet[T], super().__or__(other))
def __and__(self, other: AbstractSet[T_co]) -> OrderedSet[T]:
# MutableSet impl will iterate over other, iter over smaller of two sets
if isinstance(other, OrderedSet) and len(self) < len(other):
return other & self
return cast(OrderedSet[T], super().__and__(other))
def __xor__(self, other: AbstractSet[T_co]) -> OrderedSet[T]:
return cast(OrderedSet[T], super().__xor__(other))
def __repr__(self) -> str:
return f"{self.__class__.__name__}({list(self)})"
def __getstate__(self) -> list[T]:
return list(self._dict.keys())
def __setstate__(self, state: list[T]) -> None:
self._dict = dict.fromkeys(state, None)
def __reduce__(self) -> tuple[type[OrderedSet[T]], tuple[list[T]]]:
return (OrderedSet, (list(self),))
```
|
=======================================================================================================================
SOURCE CODE FILE: _python_dispatch.py
LINES: 1
SIZE: 28.55 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_python_dispatch.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import contextlib
import warnings
from dataclasses import dataclass
from typing import Any, Optional, Union, Protocol, overload
from collections.abc import Sequence
from typing_extensions import TypeIs
from collections import deque
import torch
import torchgen
import torchgen.model
from torch._C import (
_get_dispatch_stack_at,
_len_torch_dispatch_stack,
_pop_torch_dispatch_stack,
_push_on_torch_dispatch_stack,
DispatchKey,
)
# TODO: Limitations and things about enable_torch_dispatch_mode we should fix before exposing it:
# - We need a better user-facing api for _DisableTorchDispatch that
# is able to selectively disable __torch_dispatch__ of a particular class.
# - It doesn't work with the tensor constructors (torch.tensor, torch.Tensor)
# - Better name (see https://github.com/pytorch/pytorch/pull/63496#discussion_r694091694)
_is_in_torch_dispatch_mode = False
_is_in_non_infra_torch_dispatch_mode = False
def is_in_torch_dispatch_mode(include_infra_modes=True) -> bool:
return _is_in_torch_dispatch_mode if include_infra_modes else _is_in_non_infra_torch_dispatch_mode
class TorchDispatchMode:
"""
A ``TorchDispatchMode`` allows you to override the meaning of all
``__torch_dispatch__`` overrideable functions within a dynamic scope,
without having to actually create a tensor subclass or manually
monkey-patch functions in the PyTorch API. Some common situations
where you should use a mode:
* You want to override the meaning of factory functions, or other
functions that do not otherwise take a tensor as an argument
(these cannot be overridden with tensor subclasses).
* You want to override the behavior of all functions without needing
to wrap your inputs in tensor subclasses; e.g., if you are just
interested in logging intermediate computations.
* You want to control the order of execution of various tensor
subclasses explicitly, rather than implicitly via the return of
``NotImplemented``.
Independent subclasses of :class:`TorchDispatchMode` are compositional:
modes can be pushed onto a stack using ``with MyMode():``.
When you call functions in the PyTorch API inside your
``__torch_dispatch__`` implementation, by default, they will forward on to
the next mode on the mode stack. If you want recursively call back into
your current ``__torch_dispatch__`` implementation, either explicitly
invoke ``self.__torch_dispatch__(...)``, or use the context manager
``__torch_dispatch__(self)`` to make PyTorch
API self-referential (beware of infinite loops, in this case!)
"""
def __init__(self, _dispatch_key=None):
if _dispatch_key is not None:
assert isinstance(_dispatch_key, torch._C.DispatchKey)
self.__dict__["_dispatch_key"] = _dispatch_key
self.old_dispatch_mode_flags: deque[bool] = deque()
self.old_non_infra_dispatch_mode_flags: deque[bool] = deque()
def _lazy_init_old_dispatch_mode_flags(self):
if not hasattr(self, "old_dispatch_mode_flags"):
self.old_dispatch_mode_flags: deque[bool] = deque() # type: ignore[no-redef]
if not hasattr(self, "old_non_infra_dispatch_mode_flags"):
self.old_non_infra_dispatch_mode_flags: deque[bool] = deque() # type: ignore[no-redef]
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
raise NotImplementedError
def __enter__(self):
global _is_in_torch_dispatch_mode
global _is_in_non_infra_torch_dispatch_mode
# Previously, there wasn't any state in this class' constructor
# super calls were added to existing modes, but for any new modes
# this will replicate the previous behavior of not strictly needing
# to call super().__init__()
self._lazy_init_old_dispatch_mode_flags()
self.old_dispatch_mode_flags.append(_is_in_torch_dispatch_mode)
_is_in_torch_dispatch_mode = True
self.old_non_infra_dispatch_mode_flags.append(_is_in_non_infra_torch_dispatch_mode)
_is_in_non_infra_torch_dispatch_mode = _is_in_non_infra_torch_dispatch_mode or not self.is_infra_mode()
_push_mode(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
mb_dk_or_mode_key = self.__dict__.get("_dispatch_key", None)
if mb_dk_or_mode_key is None:
# Today, mode keys are not used at all in the per-dispatch-key-mode logic (for pre-dispatch)
# We should probably revisit this.
mb_dk_or_mode_key = self.__dict__.get("_mode_key", None)
global _is_in_torch_dispatch_mode
_is_in_torch_dispatch_mode = self.old_dispatch_mode_flags.pop()
global _is_in_non_infra_torch_dispatch_mode
_is_in_non_infra_torch_dispatch_mode = self.old_non_infra_dispatch_mode_flags.pop()
_pop_mode(mb_dk_or_mode_key)
@classmethod
def push(cls, *args, **kwargs):
warnings.warn(
"`Mode.push()` is no longer necessary and can be replaced with just `with Mode()`"
)
instance = cls(*args, **kwargs)
return instance
@classmethod
def is_infra_mode(cls):
return False
def _get_current_dispatch_mode():
stack_len = _len_torch_dispatch_stack()
# Return a user mode on the stack if there are any
if stack_len > 0:
return _get_dispatch_stack_at(stack_len - 1)
return None
def _detect_infra_mode(key):
assert key in [torch._C._TorchDispatchModeKey.FUNCTIONAL, torch._C._TorchDispatchModeKey.PROXY]
from torch._ops import _get_dispatch_mode_pre_dispatch
pre_dispatch_mode = _get_dispatch_mode_pre_dispatch(
key
)
post_dispatch_mode = torch._C._get_dispatch_mode(
key
)
assert (pre_dispatch_mode is None) or (
post_dispatch_mode is None
)
if pre_dispatch_mode is None:
return post_dispatch_mode
return pre_dispatch_mode
def _unset_infra_mode(key):
from torch._ops import _get_dispatch_mode_pre_dispatch, unset_mode_pre_dispatch
pre_dispatch_mode = _get_dispatch_mode_pre_dispatch(key)
post_dispatch_mode = torch._C._get_dispatch_mode(key)
if pre_dispatch_mode and post_dispatch_mode:
raise AssertionError(
"Can't have active infra mode on both pre and post dispatch mode stack"
)
if pre_dispatch_mode:
mode = unset_mode_pre_dispatch(key)
return mode
if post_dispatch_mode:
return torch._C._unset_dispatch_mode(key)
def _disable_infra_mode(key):
assert key in (
torch._C._TorchDispatchModeKey.FUNCTIONAL,
torch._C._TorchDispatchModeKey.PROXY,
)
mode_unset = _unset_infra_mode(key)
try:
yield mode_unset
finally:
if mode_unset is not None:
_push_mode(mode_unset)
def _get_current_dispatch_mode_stack():
stack_len = _len_torch_dispatch_stack()
return [_get_dispatch_stack_at(i) for i in range(stack_len)]
def _push_mode(mode: TorchDispatchMode):
k = mode._dispatch_key if hasattr(mode, "_dispatch_key") else None
assert k is None or k == torch._C.DispatchKey.PreDispatch
if k is None:
_push_on_torch_dispatch_stack(mode)
return
from torch._ops import _set_mode_pre_dispatch, get_cached_ops
# See Note [Not Caching Per-Dispatch-Key Mode Handlers]
# Clear the cache of every op that has been used so far, for this particular key.
ks = torch._C._functionality_to_backend_keys(k)
for op in get_cached_ops():
for key in ks:
op._uncache_dispatch(key)
_set_mode_pre_dispatch(mode)
def _pop_mode(k: Optional[Union[DispatchKey, torch._C._TorchDispatchModeKey]] = None):
if k == torch._C.DispatchKey.PreDispatch: # type: ignore[attr-defined]
from torch._ops import _pop_mode_from_pre_dispatch
return _pop_mode_from_pre_dispatch()
if k is None or isinstance(k, torch._C._TorchDispatchModeKey):
return _pop_torch_dispatch_stack(k)
@contextlib.contextmanager
def _pop_mode_temporarily(k: Optional[DispatchKey] = None):
old = _pop_mode(k)
try:
yield old
finally:
_push_mode(old)
@contextlib.contextmanager
def _disable_current_modes():
from torch._ops import (
_len_torch_dispatch_stack_pre_dispatch,
_pop_mode_from_pre_dispatch,
)
from torch._subclasses.functional_tensor import FunctionalTensorMode
from torch.fx.experimental.proxy_tensor import ProxyTorchDispatchMode
from torch._subclasses.schema_check_mode import SchemaCheckMode
mode_len_pre_dispatch = _len_torch_dispatch_stack_pre_dispatch()
old_pre_dispatch_modes = [
_pop_mode_from_pre_dispatch() for _ in range(mode_len_pre_dispatch)
]
has_proxy_mode_in_pre_dispatch = False
has_functional_mode_in_pre_dispatch = False
has_schema_check_mode_in_pre_dispatch = False
for i in old_pre_dispatch_modes:
if isinstance(i, ProxyTorchDispatchMode):
has_proxy_mode_in_pre_dispatch = True
if isinstance(i, FunctionalTensorMode):
has_functional_mode_in_pre_dispatch = True
if isinstance(i, SchemaCheckMode):
has_schema_check_mode_in_pre_dispatch = True
mode_len = _len_torch_dispatch_stack()
old_modes = [_pop_mode() for _ in range(mode_len)]
for old in old_modes:
if (
isinstance(old, FunctionalTensorMode)
and has_functional_mode_in_pre_dispatch
):
raise AssertionError(
"Can't have FunctionalMode available both in PreDispatch and Python Key"
)
if isinstance(old, ProxyTorchDispatchMode) and has_proxy_mode_in_pre_dispatch:
raise AssertionError(
"Can't have ProxyTorchDispatchMode available both in PreDispatch and Python Key"
)
if (
isinstance(old, SchemaCheckMode)
and has_schema_check_mode_in_pre_dispatch
):
raise AssertionError(
"Can't have SchemaCheckMode available both in PreDispatch and Python Key"
)
# Manually disable proxy and fake modes, if any are active
try:
yield old_pre_dispatch_modes + old_modes
finally:
for mode in reversed(old_modes):
_push_mode(mode)
for mode in reversed(old_pre_dispatch_modes):
_push_mode(mode)
class BaseTorchDispatchMode(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
return func(*args, **kwargs)
# Subtypes which have __tensor_flatten__ and __tensor_unflatten__.
class TensorWithFlatten(Protocol):
def __tensor_flatten__(self) -> tuple[Sequence[str], object]:
...
@staticmethod
def __tensor_unflatten__(inner_tensors: int, flatten_spec: int, outer_size: int, outer_stride: int) -> torch.Tensor:
...
# It would be really nice to be able to say that the return of
# is_traceable_wrapper_subclass() is Intersection[torch.Tensor,
# TensorWithFlatten] - but that doesn't exist.
shape: torch._C.Size
@overload
def stride(self, dim: None = None) -> tuple[int, ...]:
...
@overload
def stride(self, dim: int) -> int:
...
@overload
def size(self, dim: None = None) -> tuple[int, ...]:
...
@overload
def size(self, dim: int) -> int:
...
def storage_offset(self) -> int:
...
def dim(self) -> int:
...
@overload
def to(
self,
dtype: torch.types._dtype,
non_blocking: bool = False,
copy: bool = False,
*,
memory_format: Optional[torch.memory_format] = None
) -> torch.Tensor:
...
@overload
def to(
self,
device: Optional["torch._prims_common.DeviceLikeType"] = None,
dtype: Optional[torch.types._dtype] = None,
non_blocking: bool = False,
copy: bool = False,
*,
memory_format: Optional[torch.memory_format] = None
) -> torch.Tensor:
...
@overload
def to(
self,
other: torch.Tensor,
non_blocking: bool = False,
copy: bool = False,
*,
memory_format: Optional[torch.memory_format] = None
) -> torch.Tensor:
...
def is_traceable_wrapper_subclass(t: object) -> TypeIs[TensorWithFlatten]:
"""
Returns whether or not a tensor subclass that implements __torch_dispatch__
is 'traceable' with torch.compile.
In order for a tensor subclass to support TorchDispatchMode-style tracing in PT2,
It must implement two magic methods: __tensor_flatten__ and __tensor_unflatten__.
It is also expected to obey some restrictions around traceability and aliasing:
* The subclass's __torch_dispatch__() implementation should desugar into pytorch
dispatcher operations that can be traced into a graph.
* The subclass should use return_and_correct_aliasing(). This is needed today to make
sure that torch.compile does the right thing in a few cases around input mutation
and output aliasing.
Expected magic method signatures:
attrs, ctx = t.__tensor_flatten__()
attrs: list of attribute name strings for inner tensors
ctx: dict containing any other subclass-specific metadata needed for unflattening
t = MySubClass.__tensor_unflatten__(inner_tensors, ctx, outer_size, outer_stride)
inner_tensors: dict mapping attribute name -> tensor for each inner tensor
ctx: dict with subclass metadata in the form that __tensor_flatten__() produces
outer_size: expected (possibly symbolic) size that the returned subclass
instance should have. Note that this arg is useful for certain subclasses
that require the shape info to be constructed. In most cases, this arg can be
safely ignored.
outer_stride: expected (possibly symbolic) stride that the returned subclass
instance should have. Note that this arg is useful for certain subclasses
that require the stride info to be constructed. In most cases, this arg can be
safely ignored.
"""
is_subclass = isinstance(t, torch.Tensor) and type(t) != torch.Tensor
return (
is_subclass
and hasattr(t, "__tensor_flatten__")
and hasattr(t, "__tensor_unflatten__")
)
def is_traceable_wrapper_subclass_type(t: type) -> TypeIs[type[TensorWithFlatten]]:
"""Same as above, but takes a type argument instead of an instance."""
return (issubclass(t, torch.Tensor) and t != torch.Tensor
and hasattr(t, "__tensor_flatten__") and hasattr(t, "__tensor_unflatten__"))
def transform_subclass(t, callback, outer_size=None, outer_stride=None):
"""
Given a traceable, wrapper tensor subclass ``t`` that implements
``__torch_dispatch__`` and holds some inner tensors,
and a callback of type ``Callable[[str, torch.Tensor], torch.Tensor]``,
`transform_subclass` will construct a fresh instance of the wrapper tensor subclass.
It will do so by grabbing each inner tensor attribute from the wrapper,
passing them into ``callback`` to get a transformed tensor,
and putting each transformed tensor into the fresh tensor subclass instance.
Note: this function will not handle ensuring that the fresh subclass
gets the same (autograd, and aliasing) metadata as the original tensor.
This is generally handled in other subsystems like AOTAutograd.
"""
outer_size = outer_size if outer_size is not None else t.size()
outer_stride = outer_stride if outer_stride is not None else t.stride()
attrs, ctx = t.__tensor_flatten__()
transformed_tensors_dict = {}
for attr in attrs:
transformed_tensors_dict[attr] = callback(attr, getattr(t, attr))
sub = type(t).__tensor_unflatten__(
transformed_tensors_dict, ctx, outer_size, outer_stride
)
# NB: Purposefully guard here to simplify the inner / outer symbols.
# Using sym_eq() for symbolic comparison can result in an expression that's too
# difficult to guard on, so we use == here.
assert sub.shape == outer_size, (
f"Expected return value from {type(t)}__tensor_unflatten__() to have "
f"shape equal to {outer_size}, but got: {sub.shape}"
)
assert sub.stride() == outer_stride, (
f"Expected return value from {type(t)}__tensor_unflatten__() to have "
f"stride equal to {outer_stride}, but got: {sub.stride()}"
)
return sub
def _correct_storage_aliasing(func, schema_info, args, outs):
"""
Given: an OpOverload, a SchemaInfo (cached information from torchgen about schema),
and the inputs/outputs to the OpOverload,
this function checks to see if func is a view operator
(by checking if any of the outputs in the op's schema
are immutable aliases of inputs).
If so, this function manually aliases the storage of the output tensor
with its corresponding input tensor alias.
It does this by unsafely overwriting the storage field of the output tensor
to be the same storage as the input.
"""
assert isinstance(func, torch._ops.OpOverload)
assert isinstance(args, tuple)
assert isinstance(outs, (list, tuple))
def alias_non_inplace_storage(arg, ret):
# This is hopefully a reasonable assert:
# subclasses that rely on this API for output aliasing
# should always return wrapper tensor subclasses for us to manually alias.
# in theory if a subclass that needs this API wants to sometimes return
# plain tensors, we could remove the assert and just not perform the aliasing,
# but it seems safer to learn more about this case first.
if is_traceable_wrapper_subclass(arg) or is_traceable_wrapper_subclass(ret):
ret_list = ret if isinstance(ret, list) else [ret]
for r in ret_list:
assert type(arg) == type(
r
), f"""Called {str(func)} with input of type {type(arg)}
and output of type {type(ret)}. But expected types to match."""
# Need to call a non-dispatcher helper, because we explicitly do **not**
# want our subclass to intercept the set_() call.
# instead, our subclass should directly have its storage swapped out.
# we **explicitly** don't want to reset the sizes on ret, if the storage implies a size change.
# Why?
# The purpose of this API is *not* to change the size/strides of our output- we assume it's already correct.
# We just want to "fix up" the storage aliasing, without modifying or output's metadata.
# Example: out = inp.expand(inp.shape[0], inp.shape[0])
# This requires swapping the storage of out to be the same as inp,
# but we do *not* want it to change the sizes/strides that were compute for out.
if isinstance(ret, list):
for r in ret:
torch._functionalize_unsafe_set(r, arg)
else:
assert isinstance(ret, torch.Tensor), f"type: {type(ret)}"
torch._functionalize_unsafe_set(ret, arg)
def is_read_only_alias_match(arg, ret):
shared_aliases = arg.alias_set & ret.alias_set
return len(shared_aliases) > 0 and not arg.is_write
num_args = len(func._schema.arguments)
num_returns = len(func._schema.returns)
for arg_idx in range(num_args):
for return_idx in range(num_returns):
if is_read_only_alias_match(
schema_info.args[arg_idx], schema_info.outs[return_idx]
):
alias_non_inplace_storage(args[arg_idx], outs[return_idx])
# This abstracts over the fact that in return_and_correct_aliasing,
# we sometimes use torchgen schema parsing (for aten ops, since torchscript's schema parsing is sometimes buggy),
# and sometimes use torchscript schema parsing (for custom ops, for which torchgen parsing is untested).
@dataclass
class AliasInfo:
alias_set: set[str]
is_write: bool
name: Optional[str]
@dataclass
class SchemaInfo:
args: list[AliasInfo]
outs: list[AliasInfo]
# Can't import torch._ops.OpOverload due to circular reference
parsed_schema_map: dict[Any, SchemaInfo] = {}
# Given an OpOverload, returns schema information on it.
# This is cached for efficiency, since it can involve running torchgen
def get_alias_info(func) -> SchemaInfo:
if func in parsed_schema_map:
return parsed_schema_map[func]
# For ATen ops: use torchgen (since torchscript parser doesn't handle alias annotations
# properly for some ops that output tensorlists)
if func.namespace == "aten":
torchgen_schema_str = str(func._schema)
assert torchgen_schema_str.startswith("aten::")
# remove the aten:: namespace, which is added by the torchscript parser,
# and torchgen doesn't know how to handle
torchgen_schema_str = torchgen_schema_str[6:]
import re
# the torchscript parser ends up converting int[2]=1 into int[2]=[1, 1],
# which torchgen chokes on.
torchgen_schema_str = re.sub(r"=\[[0, ]+\]", "=0", torchgen_schema_str)
torchgen_schema_str = re.sub(r"=\[[1, ]+\]", "=1", torchgen_schema_str)
# for aten::rot90 / aten:fft_*
torchgen_schema_str = re.sub(r"=\[(-?[0-9]+), (-?[0-9]+)\]", r"=[\1,\2]", torchgen_schema_str)
torchgen_schema = torchgen.model.FunctionSchema.parse(torchgen_schema_str)
arg_schemas = [
AliasInfo(
alias_set=(
set() if a.annotation is None else set(a.annotation.alias_set)
),
is_write=a.annotation is not None and a.annotation.is_write,
name=a.name,
)
for a in torchgen_schema.arguments.flat_all
]
out_schemas = [
AliasInfo(
alias_set=(
set() if a.annotation is None else set(a.annotation.alias_set)
),
is_write=a.annotation is not None and a.annotation.is_write,
name=a.name,
)
for a in torchgen_schema.returns
]
else:
# For non-aten ops, torchgen is untested so we rely on torchscript schema parsing
arg_schemas = [
AliasInfo(
alias_set=(
set() if a.alias_info is None else set(a.alias_info.before_set)
),
is_write=a.alias_info is not None and a.alias_info.is_write,
name=a.name,
)
for a in func._schema.arguments
]
out_schemas = [
AliasInfo(
alias_set=(
set() if a.alias_info is None else set(a.alias_info.before_set)
),
is_write=a.alias_info is not None and a.alias_info.is_write,
name=a.name,
)
for a in func._schema.returns
]
schema_info = SchemaInfo(args=arg_schemas, outs=out_schemas)
parsed_schema_map[func] = schema_info
return schema_info
def return_and_correct_aliasing(func, args, kwargs, out):
"""
This function should be used by wrapper tensor ``__torch_dispatch__`` subclasses
that would like to work with torch.compile. It ensures that the subclass
properly implements the aliasing behavior of every op,
which is needed for correctness in AOTAutograd.
This function will handle:
* When we see a view op, we will alias the storages of any
input and output tensor subclasses
* When we see an inplace or out= op, we will directly
return the corresponding input tensor, instead of returning
a (potentially) fresh output tensor.
"""
# Caching here because torchgen parsing is definitely not fast, and this function is called
# once for every op in the graph during functionalization.
schema_info = get_alias_info(func)
def get_write_alias(x):
if len(x.alias_set) == 0:
return None
alias_set = list(x.alias_set)
# torchscript allows for complicated alias sets, but our dispatcher ops only really involve simple aliasing
assert len(alias_set) == 1
if x.is_write:
return alias_set[0]
return None
def get_arg_from_alias(output_alias, schema_info, args, kwargs):
new_args, new_kwargs = torch.fx.operator_schemas.normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs
)
arg_indices = [
i for i, a in enumerate(schema_info.args) if output_alias in a.alias_set
]
# For any dispatcher op with an output alias, we expect it to map to exactly one alias in the schema's input arguments.
assert len(arg_indices) == 1
idx = arg_indices[0]
arg_info = schema_info.args[idx]
if arg_info.name is not None and arg_info.name in new_kwargs:
return new_kwargs[arg_info.name]
return new_args[idx]
# Fix up the storages of any outs so that they point to the same storage as the input,
# if func is a view op.
_correct_storage_aliasing(
func, schema_info, args, (out,) if not isinstance(out, tuple) else out
)
# For inplace_view ops in particular, we'll try hard to make sure that the wrapper subclass's
# metadata is set correctly.
if torch.Tag.inplace_view in func.tags:
# no_dispatch() to make sure that we secretly change the metadata on the wrapper,
# but don't end up dispatching the op anywhere else.
mutated_args = [
x
for i, x in enumerate(args)
if get_write_alias(schema_info.args[i]) is not None
]
# Assumption: we have a very small number of inplace_view ops that follow a strict schema:
# there is only a single argument that gets its metadata mutated.
assert len(mutated_args) == 1
# This check exists because we generally *do* want to update the metadata of any wrapper subclasses,
# but FunctionalTensor is special: it overrides all size/stride calls to plumb to the inner tensor.
# so we don't actually need to update the metadata (and attempting to do so causes errors)
from torch._subclasses.functional_tensor import FunctionalTensor
if not isinstance(mutated_args[0], FunctionalTensor):
with torch.utils._mode_utils.no_dispatch():
# See Note: [Fake Tensor Dispatch Keys]
# we're borrowing the way it modifies dispatch key TLS.
meta_in_tls = torch._C._meta_in_tls_dispatch_include()
torch._C._set_meta_in_tls_dispatch_include(True)
try:
func(*args, **kwargs)
finally:
torch._C._set_meta_in_tls_dispatch_include(meta_in_tls)
# Next: we need to make sure to return inputs directly, if the output is a mutable alias (e.g. add_()).
# simple case: none of our outputs have mutable aliases, so we can return the output as-is
if not any(get_write_alias(r) is not None for r in schema_info.outs):
return out
# simplifying assumption: we don't have **any** ops with return types like "-> (Tensor(a!), Tensor)"
if not all(get_write_alias(r) is not None for r in schema_info.outs):
raise RuntimeError("Unsupported schema: " + str(func._schema))
if len(func._schema.returns) == 1:
return get_arg_from_alias(
get_write_alias(schema_info.outs[0]), schema_info, args, kwargs
)
# In the multi-return case, all aten ops return a tuple / list, so cast accordingly.
outs_to_return = type(out)(
[
(
get_arg_from_alias(
get_write_alias(schema_info.outs[i]), schema_info, args, kwargs
)
if get_write_alias(r) is not None
else o
)
for ((i, r), o) in zip(enumerate(schema_info.outs), out)
]
)
return outs_to_return
```
|
==============================================================================================================
SOURCE CODE FILE: _pytree.py
LINES: 2
SIZE: 62.60 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_pytree.py
ENCODING: utf-8
```py
"""
Contains utility functions for working with nested python data structures.
A *pytree* is Python nested data structure. It is a tree in the sense that
nodes are Python collections (e.g., list, tuple, dict) and the leaves are
Python values. Furthermore, a pytree should not contain reference cycles.
pytrees are useful for working with nested collections of Tensors. For example,
one can use `tree_map` to map a function over all Tensors inside some nested
collection of Tensors and `tree_leaves` to get a flat list of all Tensors
inside some nested collection. pytrees are helpful for implementing nested
collection support for PyTorch APIs.
This pytree implementation is not very performant due to Python overhead
To improve the performance we can move parts of the implementation to C++.
"""
import dataclasses
import functools
import importlib
import importlib.metadata
import json
import sys
import threading
import types
import warnings
from collections import defaultdict, deque, namedtuple, OrderedDict
from collections.abc import Hashable, Iterable, Mapping, Sequence
from enum import Enum
from typing import (
Any,
Callable,
cast,
Generic,
Optional,
overload,
Protocol,
TypeVar,
Union,
)
from typing_extensions import deprecated, NamedTuple
__all__ = [
"PyTree",
"Context",
"FlattenFunc",
"UnflattenFunc",
"DumpableContext",
"ToDumpableContextFn",
"FromDumpableContextFn",
"TreeSpec",
"LeafSpec",
"keystr",
"key_get",
"register_pytree_node",
"tree_flatten",
"tree_flatten_with_path",
"tree_unflatten",
"tree_iter",
"tree_leaves",
"tree_leaves_with_path",
"tree_structure",
"tree_map",
"tree_map_with_path",
"tree_map_",
"tree_map_only",
"tree_map_only_",
"tree_all",
"tree_any",
"tree_all_only",
"tree_any_only",
"treespec_dumps",
"treespec_loads",
"treespec_pprint",
]
T = TypeVar("T")
S = TypeVar("S")
U = TypeVar("U")
R = TypeVar("R")
DEFAULT_TREESPEC_SERIALIZATION_PROTOCOL = 1
NO_SERIALIZED_TYPE_NAME_FOUND = "NO_SERIALIZED_TYPE_NAME_FOUND"
class KeyEntry(Protocol):
def __hash__(self) -> int:
...
def __eq__(self, other: object) -> bool:
...
def __str__(self) -> str:
...
def get(self, parent: Any) -> Any:
...
class EnumEncoder(json.JSONEncoder):
def default(self, obj: object) -> str:
if isinstance(obj, Enum):
return obj.value # type: ignore[no-any-return]
return super().default(obj) # type: ignore[no-any-return]
Context = Any
PyTree = Any
FlattenFunc = Callable[[PyTree], tuple[list[Any], Context]]
UnflattenFunc = Callable[[Iterable[Any], Context], PyTree]
DumpableContext = Any # Any json dumpable text
ToDumpableContextFn = Callable[[Context], DumpableContext]
FromDumpableContextFn = Callable[[DumpableContext], Context]
ToStrFunc = Callable[["TreeSpec", list[str]], str]
MaybeFromStrFunc = Callable[[str], Optional[tuple[Any, Context, str]]]
KeyPath = tuple[KeyEntry, ...]
FlattenWithKeysFunc = Callable[[PyTree], tuple[list[tuple[KeyEntry, Any]], Any]]
# A NodeDef holds two callables:
# - flatten_fn should take the collection and return a flat list of values.
# It can also return some context that is used in reconstructing the
# collection.
# - unflatten_fn should take a flat list of values and some context
# (returned by flatten_fn). It returns the collection by reconstructing
# it from the list and the context.
# - flatten_with_keys_fn, which is a callable that takes a
# pytree and returns a list of (keypath, value) pairs and a context.
class NodeDef(NamedTuple):
type: type[Any]
flatten_fn: FlattenFunc
unflatten_fn: UnflattenFunc
flatten_with_keys_fn: Optional[FlattenWithKeysFunc]
_NODE_REGISTRY_LOCK = threading.RLock()
SUPPORTED_NODES: dict[type[Any], NodeDef] = {}
# _SerializeNodeDef holds the following:
# - typ: the type of the node (e.g., "Dict", "List", etc)
# - serialized_type_name: the fully qualified name of the type, e.g. "collections.OrderedDict"
# - to_dumpable_context takes a TreeSpec, and returns a serialized string format of the
# context, and the version number
# - from_dumpable_context takes in a string representation of the context, and the
# version, and returns the deserialized context
class _SerializeNodeDef(NamedTuple):
typ: type[Any]
serialized_type_name: str
to_dumpable_context: Optional[ToDumpableContextFn]
from_dumpable_context: Optional[FromDumpableContextFn]
SUPPORTED_SERIALIZED_TYPES: dict[type[Any], _SerializeNodeDef] = {}
SERIALIZED_TYPE_TO_PYTHON_TYPE: dict[str, type[Any]] = {}
# NB: we try really hard to not import _cxx_pytree (which depends on optree)
# as much as possible. This is for isolation: a user who is not using C++ pytree
# shouldn't pay for it, and it helps makes things like cpython upgrades easier.
try:
_optree_version = importlib.metadata.version("optree")
except importlib.metadata.PackageNotFoundError:
# No optree package found
_cxx_pytree_dynamo_traceable = _cxx_pytree_exists = False
else:
from torch._vendor.packaging.version import Version
# Keep this in sync with torch.utils._cxx_pytree!
if Version(_optree_version) < Version("0.13.0"):
# optree package less than our required minimum version.
# Pretend the optree package doesn't exist.
# NB: We will raise ImportError if the user directly tries to
# `import torch.utils._cxx_pytree` (look in that file for the check).
_cxx_pytree_dynamo_traceable = _cxx_pytree_exists = False
else:
_cxx_pytree_dynamo_traceable = _cxx_pytree_exists = True
_cxx_pytree_imported = False
_cxx_pytree_pending_imports: list[Any] = []
def register_pytree_node(
cls: type[Any],
flatten_fn: FlattenFunc,
unflatten_fn: UnflattenFunc,
*,
serialized_type_name: Optional[str] = None,
to_dumpable_context: Optional[ToDumpableContextFn] = None,
from_dumpable_context: Optional[FromDumpableContextFn] = None,
flatten_with_keys_fn: Optional[FlattenWithKeysFunc] = None,
) -> None:
"""Register a container-like type as pytree node.
Args:
cls: the type to register
flatten_fn: A callable that takes a pytree and returns a flattened
representation of the pytree and additional context to represent the
flattened pytree.
unflatten_fn: A callable that takes a flattened version of the pytree,
additional context, and returns an unflattened pytree.
serialized_type_name: A keyword argument used to specify the fully qualified
name used when serializing the tree spec.
to_dumpable_context: An optional keyword argument to custom specify how
to convert the context of the pytree to a custom json dumpable
representation. This is used for json serialization, which is being
used in torch.export right now.
from_dumpable_context: An optional keyword argument to custom specify how
to convert the custom json dumpable representation of the context
back to the original context. This is used for json deserialization,
which is being used in torch.export right now.
flatten_with_keys_fn: An optional keyword argument to specify how to
access each pytree leaf's keypath when flattening and tree-mapping.
Like ``flatten_fn``, but in place of a List[leaf], it should return
a List[(keypath, leaf)].
"""
with _NODE_REGISTRY_LOCK:
if cls in SUPPORTED_NODES:
raise ValueError(f"{cls} is already registered as pytree node.")
_private_register_pytree_node(
cls,
flatten_fn,
unflatten_fn,
serialized_type_name=serialized_type_name,
to_dumpable_context=to_dumpable_context,
from_dumpable_context=from_dumpable_context,
flatten_with_keys_fn=flatten_with_keys_fn,
)
if not _cxx_pytree_exists:
return
if _cxx_pytree_imported:
from . import _cxx_pytree as cxx
cxx._private_register_pytree_node(
cls,
flatten_fn,
unflatten_fn,
serialized_type_name=serialized_type_name,
to_dumpable_context=to_dumpable_context,
from_dumpable_context=from_dumpable_context,
)
else:
args = (cls, flatten_fn, unflatten_fn)
kwargs = {
"serialized_type_name": serialized_type_name,
"to_dumpable_context": to_dumpable_context,
"from_dumpable_context": from_dumpable_context,
}
_cxx_pytree_pending_imports.append((args, kwargs))
def register_dataclass(cls: type[Any]) -> None:
"""Registers a ``dataclasses.dataclass`` type as a pytree node.
This is a simpler API than :func:`register_pytree_node` for registering
a dataclass.
Args:
cls: the dataclass type to register
Example:
>>> from torch import Tensor
>>> from dataclasses import dataclass
>>> import torch.utils._pytree as pytree
>>>
>>> @dataclass
>>> class Point:
>>> x: Tensor
>>> y: Tensor
>>>
>>> pytree.register_dataclass(Point)
>>>
>>> point = Point(torch.tensor(0), torch.tensor(1))
>>> point = pytree.tree_map(lambda x: x + 1, point)
>>> assert torch.allclose(point.x, torch.tensor(1))
>>> assert torch.allclose(point.y, torch.tensor(2))
"""
import torch.export
# Eventually we should move the export code here. It is not specific to export,
# aside from the serialization pieces.
torch.export.register_dataclass(cls)
CONSTANT_NODES: set[type] = set()
def register_constant(cls: type[Any]) -> None:
"""Registers a type as a pytree node with no leaves.
In a :func:`torch.compile` region, if instances of these types get passed to
:func:`torch._dynamo.nonstrict_trace`-ed function, they treated as a
constant (sometimes referred to as "static"):
1. if the instance object existed before the :func:`torch.compile` region,
we _assume_ no mutation will happen to it inside the :func:`torch.compile`
region, require that it has non-default `__eq__` and `__hash__` methods, and
we guard on the instance based on its `__eq__` method, i.e., if a new
instance fails to match any instances from the previous compilations,
:func:`torch.compile` will recompile the function using the new instance.
2. else if the instance object is created inside the :func:`torch.compile`
region, we currently don't support using it in a
:func:`torch._dynamo.nonstrict_trace`-ed function.
In general, if your class holds Tensors or dynamic int/float/bool (values that
may change from run-to-run of a function being compiled), then you probably
do not want to register it as a constant.
Otherwise if you want to pass instance of a class to a
:func:`torch._dynamo.nonstrict_trace`-ed function, but you either can't use
:func:`register_pytree_node` on the class, or the class is "constant" enough
that you don't want to bother using :func:`register_pytree_node`, you should
consider using this function.
Args:
cls: the type to register as a constant. This type must be hashable.
Example:
>>> from dataclasses import dataclass
>>> import torch.utils._pytree as pytree
>>>
>>> @dataclass(frozen=True)
>>> class Config:
>>> norm: str
>>>
>>> pytree.register_constant(Config)
>>>
>>> config = Config("l2")
>>> values, spec = pytree.tree_flatten(config)
>>> assert len(values) == 0
"""
if cls.__eq__ is object.__eq__: # type: ignore[comparison-overlap]
raise TypeError(
"register_constant(cls) expects `cls` to have a non-default `__eq__` implementation."
)
# Class with a custom `__eq__` without `__hash__` won't inherit the default
# `__hash__` from object; see https://stackoverflow.com/a/1608907.
if cls.__hash__ is None: # type: ignore[comparison-overlap]
raise TypeError(
"register_constant(cls) expects `cls` to have a non-default `__hash__` implementation."
)
def _flatten(x): # type: ignore[no-untyped-def]
return [], ConstantNode(x)
def _unflatten(_, context): # type: ignore[no-untyped-def]
return context.value
def _flatten_with_keys(x): # type: ignore[no-untyped-def]
return [], ConstantNode(x)
with _NODE_REGISTRY_LOCK:
_private_register_pytree_node(
cls,
_flatten,
_unflatten,
flatten_with_keys_fn=_flatten_with_keys,
)
CONSTANT_NODES.add(cls)
def is_constant_class(cls: type[Any]) -> bool:
return isinstance(cls, type) and cls in CONSTANT_NODES
@dataclasses.dataclass(frozen=True)
class ConstantNode:
value: Any
def _is_constant_holder(spec: "TreeSpec") -> bool:
"""Checks if the spec is from a pytree registered with register_constant"""
return isinstance(spec.context, ConstantNode)
def _retrieve_constant(spec: "TreeSpec") -> Any:
"""Given a spec from a pytree registered with register_constant, retrieves the constant"""
assert _is_constant_holder(spec)
return tree_unflatten([], spec)
def _register_namedtuple(
cls: type[Any],
*,
serialized_type_name: str,
) -> None:
"""
Registers a namedtuple as a valid pytree node. By default namedtuples are
valid pytree nodes, but they are not serializable. This API provides the
argument `serialized_type_name` which allows these namedtuples to be
serialized.
Args:
cls: the dataclass type to register
serialized_type_name: The serialized name for the dataclass. This is
required if you want to serialize the pytree TreeSpec containing this
namedtuple.
"""
_private_register_pytree_node(
cls,
_namedtuple_flatten,
_namedtuple_unflatten,
serialized_type_name=serialized_type_name,
to_dumpable_context=_namedtuple_serialize,
from_dumpable_context=_namedtuple_deserialize,
flatten_with_keys_fn=_namedtuple_flatten_with_keys,
)
@deprecated(
"`torch.utils._pytree._register_pytree_node` is deprecated. "
"Please use `torch.utils._pytree.register_pytree_node` instead.",
category=FutureWarning,
)
def _register_pytree_node(
cls: type[Any],
flatten_fn: FlattenFunc,
unflatten_fn: UnflattenFunc,
to_str_fn: Optional[ToStrFunc] = None, # deprecated
maybe_from_str_fn: Optional[MaybeFromStrFunc] = None, # deprecated
*,
serialized_type_name: Optional[str] = None,
to_dumpable_context: Optional[ToDumpableContextFn] = None,
from_dumpable_context: Optional[FromDumpableContextFn] = None,
flatten_with_keys_fn: Optional[FlattenWithKeysFunc] = None,
) -> None:
"""Register a container-like type as pytree node for the Python pytree only.
Args:
cls: the type to register
flatten_fn: A callable that takes a pytree and returns a flattened
representation of the pytree and additional context to represent the
flattened pytree.
unflatten_fn: A callable that takes a flattened version of the pytree,
additional context, and returns an unflattened pytree.
serialized_type_name: A keyword argument used to specify the fully qualified
name used when serializing the tree spec.
to_dumpable_context: An optional keyword argument to custom specify how
to convert the context of the pytree to a custom json dumpable
representation. This is used for json serialization, which is being
used in torch.export right now.
from_dumpable_context: An optional keyword argument to custom specify how
to convert the custom json dumpable representation of the context
back to the original context. This is used for json deserialization,
which is being used in torch.export right now.
flatten_with_keys_fn: An optional keyword argument to specify how to
access each pytree leaf's keypath when flattening and tree-mapping.
Like ``flatten_fn``, but in place of a List[leaf], it should return
a List[(keypath, leaf)].
"""
if to_str_fn is not None or maybe_from_str_fn is not None:
warnings.warn(
"`to_str_fn` and `maybe_from_str_fn` is deprecated. "
"Please use `to_dumpable_context` and `from_dumpable_context` instead.",
FutureWarning,
stacklevel=2,
)
_private_register_pytree_node(
cls,
flatten_fn,
unflatten_fn,
serialized_type_name=serialized_type_name,
to_dumpable_context=to_dumpable_context,
from_dumpable_context=from_dumpable_context,
flatten_with_keys_fn=flatten_with_keys_fn,
)
def _deregister_pytree_node(
cls: type[Any],
) -> None:
"""This is an internal function that is used to deregister a pytree node type
for the Python pytree only. This should be only used inside PyTorch.
"""
with _NODE_REGISTRY_LOCK:
del SUPPORTED_NODES[cls]
node_def = SUPPORTED_SERIALIZED_TYPES[cls]
del SERIALIZED_TYPE_TO_PYTHON_TYPE[node_def.serialized_type_name]
del SUPPORTED_SERIALIZED_TYPES[cls]
CONSTANT_NODES.discard(cls)
def _private_register_pytree_node(
cls: type[Any],
flatten_fn: FlattenFunc,
unflatten_fn: UnflattenFunc,
*,
serialized_type_name: Optional[str] = None,
to_dumpable_context: Optional[ToDumpableContextFn] = None,
from_dumpable_context: Optional[FromDumpableContextFn] = None,
flatten_with_keys_fn: Optional[FlattenWithKeysFunc] = None,
) -> None:
"""This is an internal function that is used to register a pytree node type
for the Python pytree only. End-users should use :func:`register_pytree_node`
instead.
"""
with _NODE_REGISTRY_LOCK:
if cls in SUPPORTED_NODES:
# TODO: change this warning to an error after OSS/internal stabilize
warnings.warn(
f"{cls} is already registered as pytree node. "
"Overwriting the previous registration.",
)
node_def = NodeDef(cls, flatten_fn, unflatten_fn, flatten_with_keys_fn)
SUPPORTED_NODES[cls] = node_def
if (to_dumpable_context is None) ^ (from_dumpable_context is None):
raise ValueError(
f"Both to_dumpable_context and from_dumpable_context for {cls} must "
"be None or registered."
)
if serialized_type_name is None:
serialized_type_name = NO_SERIALIZED_TYPE_NAME_FOUND
serialize_node_def = _SerializeNodeDef(
cls,
serialized_type_name,
to_dumpable_context,
from_dumpable_context,
)
SUPPORTED_SERIALIZED_TYPES[cls] = serialize_node_def
SERIALIZED_TYPE_TO_PYTHON_TYPE[serialized_type_name] = cls
@dataclasses.dataclass(frozen=True)
class SequenceKey(Generic[T]):
idx: int
def __str__(self) -> str:
return f"[{self.idx!r}]"
def get(self, sequence: Sequence[T]) -> T:
return sequence[self.idx]
K = TypeVar("K", bound=Hashable)
@dataclasses.dataclass(frozen=True)
class MappingKey(Generic[K, T]):
key: K
def __str__(self) -> str:
return f"[{self.key!r}]"
def get(self, mapping: Mapping[K, T]) -> T:
return mapping[self.key]
@dataclasses.dataclass(frozen=True)
class GetAttrKey:
name: str
def __str__(self) -> str:
return f".{self.name}"
def get(self, obj: Any) -> Any:
return getattr(obj, self.name)
def _tuple_flatten(d: tuple[T, ...]) -> tuple[list[T], Context]:
return list(d), None
def _tuple_flatten_with_keys(
d: tuple[T, ...]
) -> tuple[list[tuple[KeyEntry, T]], Context]:
values, context = _tuple_flatten(d)
return [(SequenceKey(i), v) for i, v in enumerate(values)], context
def _tuple_unflatten(values: Iterable[T], context: Context) -> tuple[T, ...]:
return tuple(values)
def _list_flatten(d: list[T]) -> tuple[list[T], Context]:
return d, None
def _list_flatten_with_keys(d: list[T]) -> tuple[list[tuple[KeyEntry, T]], Context]:
values, context = _list_flatten(d)
return [(SequenceKey(i), v) for i, v in enumerate(values)], context
def _list_unflatten(values: Iterable[T], context: Context) -> list[T]:
return list(values)
def _dict_flatten(d: dict[Any, T]) -> tuple[list[T], Context]:
return list(d.values()), list(d.keys())
def _dict_flatten_with_keys(
d: dict[Any, T]
) -> tuple[list[tuple[KeyEntry, T]], Context]:
values, context = _dict_flatten(d)
return [(MappingKey(k), v) for k, v in zip(context, values)], context
def _dict_unflatten(values: Iterable[T], context: Context) -> dict[Any, T]:
return dict(zip(context, values))
def _namedtuple_flatten(d: NamedTuple) -> tuple[list[Any], Context]:
return list(d), type(d)
def _namedtuple_flatten_with_keys(
d: NamedTuple,
) -> tuple[list[tuple[KeyEntry, Any]], Context]:
values, context = _namedtuple_flatten(d)
return (
[(GetAttrKey(field), v) for field, v in zip(context._fields, values)],
context,
)
def _namedtuple_unflatten(values: Iterable[T], context: Context) -> NamedTuple:
return cast(NamedTuple, context(*values))
def _namedtuple_serialize(context: Context) -> DumpableContext:
if context not in SUPPORTED_SERIALIZED_TYPES:
raise NotImplementedError(
f"Can't serialize TreeSpec of namedtuple class {context} because we "
"didn't register a serializated_type_name. Please register using "
"`_register_namedtuple`."
)
serialize_node_def = SUPPORTED_SERIALIZED_TYPES[context]
serialized_type_name = serialize_node_def.serialized_type_name
if serialized_type_name == NO_SERIALIZED_TYPE_NAME_FOUND:
raise NotImplementedError(
f"Can't serialize TreeSpec of namedtuple class {context} because we "
"couldn't find a serializated_type_name. Please register using "
"`_register_namedtuple`."
)
return serialized_type_name
def _namedtuple_deserialize(dumpable_context: DumpableContext) -> Context:
if dumpable_context not in SERIALIZED_TYPE_TO_PYTHON_TYPE:
raise NotImplementedError(
f"Can't deserialize TreeSpec of namedtuple class {dumpable_context} "
"because we couldn't find a serializated name."
)
typ = SERIALIZED_TYPE_TO_PYTHON_TYPE[dumpable_context]
return typ
def _ordereddict_flatten(d: OrderedDict[Any, T]) -> tuple[list[T], Context]:
return list(d.values()), list(d.keys())
def _ordereddict_flatten_with_keys(
d: OrderedDict[Any, T]
) -> tuple[list[tuple[KeyEntry, T]], Context]:
values, context = _ordereddict_flatten(d)
return [(MappingKey(k), v) for k, v in zip(context, values)], context
def _ordereddict_unflatten(
values: Iterable[T],
context: Context,
) -> OrderedDict[Any, T]:
return OrderedDict((key, value) for key, value in zip(context, values))
_odict_flatten = _ordereddict_flatten
_odict_unflatten = _ordereddict_unflatten
def _defaultdict_flatten(d: defaultdict[Any, T]) -> tuple[list[T], Context]:
values, dict_context = _dict_flatten(d)
return values, [d.default_factory, dict_context]
def _defaultdict_flatten_with_keys(
d: defaultdict[Any, T]
) -> tuple[list[tuple[KeyEntry, T]], Context]:
values, context = _defaultdict_flatten(d)
_, dict_context = context
return [(MappingKey(k), v) for k, v in zip(dict_context, values)], context
def _defaultdict_unflatten(
values: Iterable[T],
context: Context,
) -> defaultdict[Any, T]:
default_factory, dict_context = context
return defaultdict(default_factory, _dict_unflatten(values, dict_context))
def _defaultdict_serialize(context: Context) -> DumpableContext:
default_factory, dict_context = context
json_defaultdict = {
"default_factory_module": default_factory.__module__,
"default_factory_name": default_factory.__qualname__,
"dict_context": dict_context,
}
return json_defaultdict
def _defaultdict_deserialize(dumpable_context: DumpableContext) -> Context:
assert isinstance(dumpable_context, dict)
assert set(dumpable_context) == {
"default_factory_module",
"default_factory_name",
"dict_context",
}
default_factory_module = dumpable_context["default_factory_module"]
default_factory_name = dumpable_context["default_factory_name"]
assert isinstance(default_factory_module, str)
assert isinstance(default_factory_name, str)
module = importlib.import_module(default_factory_module)
default_factory = getattr(module, default_factory_name)
dict_context = dumpable_context["dict_context"]
return [default_factory, dict_context]
def _deque_flatten(d: deque[T]) -> tuple[list[T], Context]:
return list(d), d.maxlen
def _deque_flatten_with_keys(
d: deque[T],
) -> tuple[list[tuple[KeyEntry, T]], Context]:
values, context = _deque_flatten(d)
return [(SequenceKey(i), v) for i, v in enumerate(values)], context
def _deque_unflatten(values: Iterable[T], context: Context) -> deque[T]:
return deque(values, maxlen=context)
_private_register_pytree_node(
tuple,
_tuple_flatten,
_tuple_unflatten,
serialized_type_name="builtins.tuple",
flatten_with_keys_fn=_tuple_flatten_with_keys,
)
_private_register_pytree_node(
list,
_list_flatten,
_list_unflatten,
serialized_type_name="builtins.list",
flatten_with_keys_fn=_list_flatten_with_keys,
)
_private_register_pytree_node(
dict,
_dict_flatten,
_dict_unflatten,
serialized_type_name="builtins.dict",
flatten_with_keys_fn=_dict_flatten_with_keys,
)
_private_register_pytree_node(
namedtuple, # type: ignore[arg-type]
_namedtuple_flatten,
_namedtuple_unflatten,
serialized_type_name="collections.namedtuple",
to_dumpable_context=_namedtuple_serialize,
from_dumpable_context=_namedtuple_deserialize,
flatten_with_keys_fn=_namedtuple_flatten_with_keys,
)
_private_register_pytree_node(
OrderedDict,
_ordereddict_flatten,
_ordereddict_unflatten,
serialized_type_name="collections.OrderedDict",
flatten_with_keys_fn=_ordereddict_flatten_with_keys,
)
_private_register_pytree_node(
defaultdict,
_defaultdict_flatten,
_defaultdict_unflatten,
serialized_type_name="collections.defaultdict",
to_dumpable_context=_defaultdict_serialize,
from_dumpable_context=_defaultdict_deserialize,
flatten_with_keys_fn=_defaultdict_flatten_with_keys,
)
_private_register_pytree_node(
deque,
_deque_flatten,
_deque_unflatten,
serialized_type_name="collections.deque",
flatten_with_keys_fn=_deque_flatten_with_keys,
)
STANDARD_DICT_TYPES: frozenset[type] = frozenset(
{dict, OrderedDict, defaultdict},
)
BUILTIN_TYPES: frozenset[type] = frozenset(
{tuple, list, dict, namedtuple, OrderedDict, defaultdict, deque}, # type: ignore[arg-type]
)
# h/t https://stackoverflow.com/questions/2166818/how-to-check-if-an-object-is-an-instance-of-a-namedtuple
def _is_namedtuple_instance(tree: Any) -> bool:
typ = type(tree)
bases = typ.__bases__
if len(bases) != 1 or bases[0] != tuple:
return False
fields = getattr(typ, "_fields", None)
if not isinstance(fields, tuple):
return False
return all(type(entry) == str for entry in fields)
def _get_node_type(tree: Any) -> Any:
if _is_namedtuple_instance(tree):
return namedtuple
return type(tree)
# A leaf is defined as anything that is not a Node.
def _is_leaf(tree: PyTree, is_leaf: Optional[Callable[[PyTree], bool]] = None) -> bool:
return (is_leaf is not None and is_leaf(tree)) or _get_node_type(
tree
) not in SUPPORTED_NODES
# A TreeSpec represents the structure of a pytree. It holds:
# "type": the type of root Node of the pytree
# context: some context that is useful in unflattening the pytree
# children_specs: specs for each child of the root Node
# num_leaves: the number of leaves
@dataclasses.dataclass(init=True, frozen=True, eq=True, repr=False)
class TreeSpec:
type: Any
context: Context
children_specs: list["TreeSpec"]
num_nodes: int = dataclasses.field(init=False)
num_leaves: int = dataclasses.field(init=False)
num_children: int = dataclasses.field(init=False)
def __post_init__(self) -> None:
num_nodes = sum((spec.num_nodes for spec in self.children_specs), start=1)
num_leaves = sum(spec.num_leaves for spec in self.children_specs)
num_children = len(self.children_specs)
object.__setattr__(self, "num_nodes", num_nodes)
object.__setattr__(self, "num_leaves", num_leaves)
object.__setattr__(self, "num_children", num_children)
def __repr__(self, indent: int = 0) -> str:
repr_prefix: str = f"TreeSpec({self.type.__name__}, {self.context}, ["
children_specs_str: str = ""
if self.num_children > 0:
indent += 2
children_specs_str += self.children_specs[0].__repr__(indent)
children_specs_str += "," if self.num_children > 1 else ""
children_specs_str += ",".join(
[
"\n" + " " * indent + child.__repr__(indent)
for child in self.children_specs[1:]
]
)
repr_suffix: str = f"{children_specs_str}])"
return repr_prefix + repr_suffix
def __eq__(self, other: PyTree) -> bool:
if self is other:
return True
elif other.__class__ is self.__class__:
if str(self.type) != str(other.type):
return False
if self.context != other.context:
return False
elif self.children_specs != other.children_specs:
return False
return True
return NotImplemented
def is_leaf(self) -> bool:
return self.num_nodes == 1 and self.num_leaves == 1
def flatten_up_to(self, tree: PyTree) -> list[PyTree]:
def helper(treespec: TreeSpec, tree: PyTree, subtrees: list[PyTree]) -> None:
if treespec.is_leaf():
subtrees.append(tree)
return
node_type = _get_node_type(tree)
if treespec.type not in BUILTIN_TYPES:
# Always require custom node types to match exactly
if node_type != treespec.type:
raise ValueError(
f"Type mismatch; "
f"expected {treespec.type!r}, but got {node_type!r}.",
)
flatten_fn = SUPPORTED_NODES[node_type].flatten_fn
children, context = flatten_fn(tree)
if len(children) != treespec.num_children:
raise ValueError(
f"Node arity mismatch; "
f"expected {treespec.num_children}, but got {len(children)}.",
)
if context != treespec.context:
raise ValueError(
f"Node context mismatch for custom node type {treespec.type!r}.",
)
else:
# For builtin dictionary types, we allow some flexibility
# Otherwise, we require exact matches
both_standard_dict = (
treespec.type in STANDARD_DICT_TYPES
and node_type in STANDARD_DICT_TYPES
)
if not both_standard_dict and node_type != treespec.type:
raise ValueError(
f"Node type mismatch; "
f"expected {treespec.type!r}, but got {node_type!r}.",
)
if len(tree) != treespec.num_children:
raise ValueError(
f"Node arity mismatch; "
f"expected {treespec.num_children}, but got {len(tree)}.",
)
if both_standard_dict:
# dictionary types are compatible with each other
dict_context = (
treespec.context
if treespec.type is not defaultdict
# ignore mismatch of `default_factory` for defaultdict
else treespec.context[1]
)
expected_keys = dict_context
got_key_set = set(tree)
expected_key_set = set(expected_keys)
if got_key_set != expected_key_set:
missing_keys = expected_key_set.difference(got_key_set)
extra_keys = got_key_set.difference(expected_key_set)
message = ""
if missing_keys:
message += f"; missing key(s): {missing_keys}"
if extra_keys:
message += f"; extra key(s): {extra_keys}"
raise ValueError(f"Node keys mismatch{message}.")
children = [tree[key] for key in expected_keys]
else:
# node_type is treespec.type
flatten_fn = SUPPORTED_NODES[node_type].flatten_fn
children, context = flatten_fn(tree)
if (
node_type is not deque # ignore mismatch of `maxlen` for deque
) and context != treespec.context:
raise ValueError(
f"Node context mismatch for node type {treespec.type!r}; "
f"expected {treespec.context!r}, but got {context!r}.", # namedtuple type mismatch
)
for subtree, subspec in zip(children, treespec.children_specs):
helper(subspec, subtree, subtrees)
subtrees: list[PyTree] = []
helper(self, tree, subtrees)
return subtrees
def unflatten(self, leaves: Iterable[Any]) -> PyTree:
if not isinstance(leaves, (list, tuple)):
leaves = list(leaves)
if len(leaves) != self.num_leaves:
raise ValueError(
f"treespec.unflatten(leaves): `leaves` has length {len(leaves)} "
f"but the spec refers to a pytree that holds {self.num_leaves} "
f"items ({self}).",
)
if self.is_leaf():
return leaves[0]
unflatten_fn = SUPPORTED_NODES[self.type].unflatten_fn
# Recursively unflatten the children
start = 0
end = 0
child_pytrees = []
for child_spec in self.children_specs:
end += child_spec.num_leaves
child_pytrees.append(child_spec.unflatten(leaves[start:end]))
start = end
return unflatten_fn(child_pytrees, self.context)
# NOTE: subclassing a dataclass is subtle. In order to enable reasoning about
# this class with `dataclasses.fields`, etc., while having a simplified
# constructor that takes no argument, we wrap with `dataclass(init=True, ...)`
# again, with fields that have `init=False`.
@dataclasses.dataclass(init=True, frozen=True, eq=False, repr=False)
class LeafSpec(TreeSpec):
type: Any = dataclasses.field(default=None, init=False)
context: Context = dataclasses.field(default=None, init=False)
children_specs: list["TreeSpec"] = dataclasses.field(
default_factory=list, init=False
)
def __post_init__(self) -> None:
# Override `__post_init__` for `num_leaves` derivation.
object.__setattr__(self, "num_nodes", 1)
object.__setattr__(self, "num_leaves", 1)
object.__setattr__(self, "num_children", 0)
def __repr__(self, indent: int = 0) -> str:
return "*"
# All leaves are equivalent, so represent with a single object to save on
# object construction time
_LEAF_SPEC = LeafSpec()
def tree_flatten(
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> tuple[list[Any], TreeSpec]:
"""Flattens a pytree into a list of values and a TreeSpec that can be used
to reconstruct the pytree.
"""
def helper(node: PyTree, leaves: list[Any]) -> TreeSpec:
if _is_leaf(node, is_leaf=is_leaf):
leaves.append(node)
return _LEAF_SPEC
node_type = _get_node_type(node)
flatten_fn = SUPPORTED_NODES[node_type].flatten_fn
children, context = flatten_fn(node)
# Recursively flatten the children
subspecs = [helper(child, leaves) for child in children]
return TreeSpec(node_type, context, subspecs)
leaves: list[Any] = []
treespec = helper(tree, leaves)
return leaves, treespec
def tree_unflatten(leaves: Iterable[Any], treespec: TreeSpec) -> PyTree:
"""Given a list of values and a TreeSpec, builds a pytree.
This is the inverse operation of `tree_flatten`.
"""
if not isinstance(treespec, TreeSpec):
raise TypeError(
f"tree_unflatten(leaves, treespec): Expected `treespec` to be "
f"instance of TreeSpec but got item of type {type(treespec)}.",
)
return treespec.unflatten(leaves)
def tree_iter(
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> Iterable[Any]:
"""Get an iterator over the leaves of a pytree."""
if _is_leaf(tree, is_leaf=is_leaf):
yield tree
else:
node_type = _get_node_type(tree)
flatten_fn = SUPPORTED_NODES[node_type].flatten_fn
child_pytrees, _ = flatten_fn(tree)
# Recursively flatten the children
for child in child_pytrees:
yield from tree_iter(child, is_leaf=is_leaf)
def tree_leaves(
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> list[Any]:
"""Get a list of leaves of a pytree."""
return list(tree_iter(tree, is_leaf=is_leaf))
def tree_structure(
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> TreeSpec:
"""Get the TreeSpec for a pytree."""
return tree_flatten(tree, is_leaf=is_leaf)[1]
def tree_map(
func: Callable[..., Any],
tree: PyTree,
*rests: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
"""Map a multi-input function over pytree args to produce a new pytree.
See also :func:`tree_map_`.
>>> tree_map(lambda x: x + 1, {'x': 7, 'y': (42, 64)})
{'x': 8, 'y': (43, 65)}
>>> tree_map(lambda x: x is None, {'x': 7, 'y': (42, 64), 'z': None})
{'x': False, 'y': (False, False), 'z': True}
If multiple inputs are given, the structure of the tree is taken from the first input;
subsequent inputs need only have ``tree`` as a prefix:
>>> tree_map(lambda x, y: [x] + y, [5, 6], [[7, 9], [1, 2]])
[[5, 7, 9], [6, 1, 2]]
Args:
func (callable): A function that takes ``1 + len(rests)`` arguments, to be applied at the
corresponding leaves of the pytrees.
tree (pytree): A pytree to be mapped over, with each leaf providing the first positional
argument to function ``func``.
rests (tuple of pytree): A tuple of pytrees, each of which has the same structure as
``tree`` or has ``tree`` as a prefix.
is_leaf (callable, optional): An extra leaf predicate function that will be called at each
flattening step. The function should have a single argument with signature
``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
leaf or not. If the function is not specified, the default pytree registry will be used.
Returns:
A new pytree with the same structure as ``tree`` but with the value at each leaf given by
``func(x, *xs)`` where ``x`` is the value at the corresponding leaf in ``tree`` and ``xs``
is the tuple of values at corresponding nodes in ``rests``.
"""
leaves, treespec = tree_flatten(tree, is_leaf=is_leaf)
flat_args = [leaves] + [treespec.flatten_up_to(r) for r in rests]
return treespec.unflatten(map(func, *flat_args))
def tree_map_(
func: Callable[..., Any],
tree: PyTree,
*rests: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
"""Like :func:`tree_map`, but do an inplace call on each leaf and return the original tree.
See also :func:`tree_map`.
Args:
func (callable): A function that takes ``1 + len(rests)`` arguments, to be applied at the
corresponding leaves of the pytrees.
tree (pytree): A pytree to be mapped over, with each leaf providing the first positional
argument to function ``func``.
rests (tuple of pytree): A tuple of pytrees, each of which has the same structure as
``tree`` or has ``tree`` as a prefix.
is_leaf (callable, optional): An extra leaf predicate function that will be called at each
flattening step. The function should have a single argument with signature
``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
leaf or not. If the function is not specified, the default pytree registry will be used.
Returns:
The original ``tree`` with the value at each leaf is given by the side-effect of function
``func(x, *xs)`` (not the return value) where ``x`` is the value at the corresponding leaf
in ``tree`` and ``xs`` is the tuple of values at values at corresponding nodes in ``rests``.
"""
leaves, treespec = tree_flatten(tree, is_leaf=is_leaf)
flat_args = [leaves] + [treespec.flatten_up_to(r) for r in rests]
deque(map(func, *flat_args), maxlen=0) # consume and exhaust the iterable
return tree
Type2 = tuple[type[T], type[S]]
Type3 = tuple[type[T], type[S], type[U]]
if sys.version_info >= (3, 10):
TypeAny = Union[type[Any], tuple[type[Any], ...], types.UnionType]
else:
TypeAny = Union[type[Any], tuple[type[Any], ...]]
Fn2 = Callable[[Union[T, S]], R]
Fn3 = Callable[[Union[T, S, U]], R]
Fn = Callable[[T], R]
FnAny = Callable[[Any], R]
MapOnlyFn = Callable[[T], Callable[[Any], Any]]
# These specializations help with type inference on the lambda passed to this
# function
@overload
def map_only(type_or_types_or_pred: type[T], /) -> MapOnlyFn[Fn[T, Any]]:
...
@overload
def map_only(type_or_types_or_pred: Type2[T, S], /) -> MapOnlyFn[Fn2[T, S, Any]]:
...
@overload
def map_only(type_or_types_or_pred: Type3[T, S, U], /) -> MapOnlyFn[Fn3[T, S, U, Any]]:
...
# This specialization is needed for the implementations below that call
@overload
def map_only(type_or_types_or_pred: TypeAny, /) -> MapOnlyFn[FnAny[Any]]:
...
@overload
def map_only(type_or_types_or_pred: Callable[[Any], bool], /) -> MapOnlyFn[FnAny[Any]]:
...
def map_only(
type_or_types_or_pred: Union[TypeAny, Callable[[Any], bool]], /
) -> MapOnlyFn[FnAny[Any]]:
"""
Suppose you are writing a tree_map over tensors, leaving everything
else unchanged. Ordinarily you would have to write:
def go(t):
if isinstance(t, Tensor):
return ...
else:
return t
With this function, you only need to write:
@map_only(Tensor)
def go(t):
return ...
You can also directly use 'tree_map_only'
"""
if isinstance(type_or_types_or_pred, (type, tuple)) or (
sys.version_info >= (3, 10)
and isinstance(type_or_types_or_pred, types.UnionType)
):
def pred(x: Any) -> bool:
return isinstance(x, type_or_types_or_pred) # type: ignore[arg-type]
elif callable(type_or_types_or_pred):
pred = type_or_types_or_pred # type: ignore[assignment]
else:
raise TypeError("Argument must be a type, a tuple of types, or a callable.")
def wrapper(func: Callable[[T], Any]) -> Callable[[Any], Any]:
@functools.wraps(func)
def wrapped(x: T) -> Any:
if pred(x):
return func(x)
return x
return wrapped
return wrapper
@overload
def tree_map_only(
type_or_types_or_pred: type[T],
/,
func: Fn[T, Any],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
...
@overload
def tree_map_only(
type_or_types_or_pred: Type2[T, S],
/,
func: Fn2[T, S, Any],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
...
@overload
def tree_map_only(
type_or_types_or_pred: Type3[T, S, U],
/,
func: Fn3[T, S, U, Any],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
...
@overload
def tree_map_only(
type_or_types_or_pred: TypeAny,
/,
func: FnAny[Any],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
...
@overload
def tree_map_only(
type_or_types_or_pred: Callable[[Any], bool],
/,
func: FnAny[Any],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
...
def tree_map_only(
type_or_types_or_pred: Union[TypeAny, Callable[[Any], bool]],
/,
func: FnAny[Any],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
return tree_map(map_only(type_or_types_or_pred)(func), tree, is_leaf=is_leaf)
@overload
def tree_map_only_(
type_or_types_or_pred: type[T],
/,
func: Fn[T, Any],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
...
@overload
def tree_map_only_(
type_or_types_or_pred: Type2[T, S],
/,
func: Fn2[T, S, Any],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
...
@overload
def tree_map_only_(
type_or_types_or_pred: Type3[T, S, U],
/,
func: Fn3[T, S, U, Any],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
...
@overload
def tree_map_only_(
type_or_types_or_pred: TypeAny,
/,
func: FnAny[Any],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
...
@overload
def tree_map_only_(
type_or_types_or_pred: Callable[[Any], bool],
/,
func: FnAny[Any],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
...
def tree_map_only_(
type_or_types_or_pred: Union[TypeAny, Callable[[Any], bool]],
/,
func: FnAny[Any],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
return tree_map_(map_only(type_or_types_or_pred)(func), tree, is_leaf=is_leaf)
def tree_all(
pred: Callable[[Any], bool],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> bool:
flat_args = tree_iter(tree, is_leaf=is_leaf)
return all(map(pred, flat_args))
def tree_any(
pred: Callable[[Any], bool],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> bool:
flat_args = tree_iter(tree, is_leaf=is_leaf)
return any(map(pred, flat_args))
@overload
def tree_all_only(
type_or_types: type[T],
/,
pred: Fn[T, bool],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> bool:
...
@overload
def tree_all_only(
type_or_types: Type2[T, S],
/,
pred: Fn2[T, S, bool],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> bool:
...
@overload
def tree_all_only(
type_or_types: Type3[T, S, U],
/,
pred: Fn3[T, S, U, bool],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> bool:
...
def tree_all_only(
type_or_types: TypeAny,
/,
pred: FnAny[bool],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> bool:
flat_args = tree_iter(tree, is_leaf=is_leaf)
return all(pred(x) for x in flat_args if isinstance(x, type_or_types))
@overload
def tree_any_only(
type_or_types: type[T],
/,
pred: Fn[T, bool],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> bool:
...
@overload
def tree_any_only(
type_or_types: Type2[T, S],
/,
pred: Fn2[T, S, bool],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> bool:
...
@overload
def tree_any_only(
type_or_types: Type3[T, S, U],
/,
pred: Fn3[T, S, U, bool],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> bool:
...
def tree_any_only(
type_or_types: TypeAny,
/,
pred: FnAny[bool],
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> bool:
flat_args = tree_iter(tree, is_leaf=is_leaf)
return any(pred(x) for x in flat_args if isinstance(x, type_or_types))
# Broadcasts a pytree to the provided TreeSpec and returns the flattened
# values. If this is not possible, then this function returns None.
#
# For example, given pytree=0 and spec=TreeSpec(list, None, [LeafSpec(), LeafSpec()]),
# would return [0, 0]. This is useful for part of the vmap implementation:
# a user can pass in vmap(fn, in_dims)(*inputs). `in_dims` should be
# broadcastable to the tree structure of `inputs` and we use
# _broadcast_to_and_flatten to check this.
def _broadcast_to_and_flatten(
tree: PyTree,
treespec: TreeSpec,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> Optional[list[Any]]:
assert isinstance(treespec, TreeSpec)
if _is_leaf(tree, is_leaf=is_leaf):
return [tree] * treespec.num_leaves
if treespec.is_leaf():
return None
node_type = _get_node_type(tree)
if node_type != treespec.type:
return None
flatten_fn = SUPPORTED_NODES[node_type].flatten_fn
child_pytrees, ctx = flatten_fn(tree)
# Check if the Node is different from the spec
if len(child_pytrees) != treespec.num_children or ctx != treespec.context:
return None
# Recursively flatten the children
result: list[Any] = []
for child, child_spec in zip(child_pytrees, treespec.children_specs):
flat = _broadcast_to_and_flatten(child, child_spec, is_leaf=is_leaf)
if flat is not None:
result += flat
else:
return None
return result
@dataclasses.dataclass
class _TreeSpecSchema:
"""
_TreeSpecSchema is the schema used to serialize the TreeSpec
It contains the following fields:
- type: A string name of the type. null for the case of a LeafSpec.
- context: Any format which is json dumpable
- children_spec: A list of children serialized specs.
"""
type: Optional[str]
context: DumpableContext
children_spec: list["_TreeSpecSchema"]
class _ProtocolFn(NamedTuple):
treespec_to_json: Callable[[TreeSpec], DumpableContext]
json_to_treespec: Callable[[DumpableContext], TreeSpec]
_SUPPORTED_PROTOCOLS: dict[int, _ProtocolFn] = {}
def _treespec_to_json(treespec: TreeSpec) -> _TreeSpecSchema:
if treespec.is_leaf():
return _TreeSpecSchema(None, None, [])
if treespec.type not in SUPPORTED_SERIALIZED_TYPES:
raise NotImplementedError(
f"Serializing {treespec.type} in pytree is not registered.",
)
serialize_node_def = SUPPORTED_SERIALIZED_TYPES[treespec.type]
serialized_type_name = serialize_node_def.serialized_type_name
if serialized_type_name == NO_SERIALIZED_TYPE_NAME_FOUND:
raise NotImplementedError(
f"No registered serialization name for {treespec.type} found. "
"Please update your _register_pytree_node call with a `serialized_type_name` kwarg."
)
if serialize_node_def.to_dumpable_context is None:
try:
serialized_context = json.dumps(treespec.context, cls=EnumEncoder)
except TypeError as e:
raise TypeError(
"Unable to serialize context. "
"Please make the context json dump-able, or register a "
"custom serializer using _register_pytree_node."
) from e
else:
serialized_context = serialize_node_def.to_dumpable_context(treespec.context)
child_schemas = [_treespec_to_json(child) for child in treespec.children_specs]
return _TreeSpecSchema(serialized_type_name, serialized_context, child_schemas)
def _json_to_treespec(json_schema: DumpableContext) -> TreeSpec:
if (
json_schema["type"] is None
and json_schema["context"] is None
and len(json_schema["children_spec"]) == 0
):
return _LEAF_SPEC
if json_schema["type"] not in SERIALIZED_TYPE_TO_PYTHON_TYPE:
raise NotImplementedError(
f'Deserializing {json_schema["type"]} in pytree is not registered.',
)
typ = SERIALIZED_TYPE_TO_PYTHON_TYPE[json_schema["type"]]
serialize_node_def = SUPPORTED_SERIALIZED_TYPES[typ]
if serialize_node_def.from_dumpable_context is None:
try:
context = json.loads(json_schema["context"])
except TypeError as ex:
raise TypeError(
"Unable to deserialize context. "
"Please make the context json load-able, or register a "
"custom serializer using _register_pytree_node.",
) from ex
else:
context = serialize_node_def.from_dumpable_context(json_schema["context"])
children_specs = [
_json_to_treespec(child_string) for child_string in json_schema["children_spec"]
]
return TreeSpec(typ, context, children_specs)
_SUPPORTED_PROTOCOLS[1] = _ProtocolFn(_treespec_to_json, _json_to_treespec)
def treespec_dumps(treespec: TreeSpec, protocol: Optional[int] = None) -> str:
if not isinstance(treespec, TreeSpec):
raise TypeError(
f"treespec_dumps(treespec, protocol): Expected `treespec` to be instance of "
f"TreeSpec but got item of type {type(treespec)}.",
)
if protocol is None:
protocol = DEFAULT_TREESPEC_SERIALIZATION_PROTOCOL
if protocol in _SUPPORTED_PROTOCOLS:
json_spec = _SUPPORTED_PROTOCOLS[protocol].treespec_to_json(treespec)
else:
raise ValueError(
f"Unknown protocol {protocol}. "
f"Available protocols: {list(_SUPPORTED_PROTOCOLS.keys())}",
)
str_spec = json.dumps((protocol, dataclasses.asdict(json_spec)), cls=EnumEncoder)
return str_spec
@functools.lru_cache
def treespec_loads(serialized: str) -> TreeSpec:
protocol, json_schema = json.loads(serialized)
if protocol in _SUPPORTED_PROTOCOLS:
return _SUPPORTED_PROTOCOLS[protocol].json_to_treespec(json_schema)
raise ValueError(
f"Unknown protocol {protocol}. "
f"Available protocols: {list(_SUPPORTED_PROTOCOLS.keys())}",
)
class _DummyLeaf:
def __repr__(self) -> str:
return "*"
def treespec_pprint(treespec: TreeSpec) -> str:
dummy_tree = tree_unflatten(
[_DummyLeaf() for _ in range(treespec.num_leaves)],
treespec,
)
return repr(dummy_tree)
# TODO(angelayi): remove this function after OSS/internal stabilize
@deprecated(
"`pytree_to_str` is deprecated. Please use `treespec_dumps` instead.",
category=FutureWarning,
)
def pytree_to_str(treespec: TreeSpec) -> str:
return treespec_dumps(treespec)
# TODO(angelayi): remove this function after OSS/internal stabilize
@deprecated(
"`str_to_pytree` is deprecated. Please use `treespec_loads` instead.",
category=FutureWarning,
)
def str_to_pytree(json: str) -> TreeSpec:
return treespec_loads(json)
def arg_tree_leaves(*args: PyTree, **kwargs: PyTree) -> list[Any]:
"""Get a flat list of arguments to this function
A slightly faster version of tree_leaves((args, kwargs))
"""
leaves: list[Any] = []
for a in args:
leaves.extend(tree_iter(a))
for a in kwargs.values():
leaves.extend(tree_iter(a))
return leaves
def tree_flatten_with_path(
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> tuple[list[tuple[KeyPath, Any]], TreeSpec]:
"""Flattens a pytree like :func:`tree_flatten`, but also returns each leaf's key path.
Args:
tree: a pytree to flatten. If it contains a custom type, that type must be
registered with an appropriate `tree_flatten_with_path_fn` when registered
with :func:`register_pytree_node`.
is_leaf: An extra leaf predicate function that will be called at each
flattening step. The function should have a single argument with signature
``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
leaf or not. If the function is not specified, the default pytree registry will be used.
Returns:
A tuple where the first element is a list of (key path, leaf) pairs, and the
second element is a :class:`TreeSpec` representing the structure of the flattened
tree.
"""
_, treespec = tree_flatten(tree, is_leaf)
return list(_generate_key_paths((), tree, is_leaf)), treespec
def tree_leaves_with_path(
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> list[tuple[KeyPath, Any]]:
"""Gets the leaves of a pytree like ``tree_leaves`` and returns each leaf's key path.
Args:
tree: a pytree. If it contains a custom type, that type must be
registered with an appropriate `tree_flatten_with_path_fn` when registered
with :func:`register_pytree_node`.
is_leaf: An extra leaf predicate function that will be called at each
flattening step. The function should have a single argument with signature
``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
leaf or not. If the function is not specified, the default pytree registry will be used.
Returns:
A list of (key path, leaf) pairs.
"""
return list(_generate_key_paths((), tree, is_leaf))
def _generate_key_paths(
key_path: KeyPath,
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> Iterable[tuple[KeyPath, Any]]:
if is_leaf and is_leaf(tree):
yield key_path, tree
return
node_type = _get_node_type(tree)
handler = SUPPORTED_NODES.get(node_type)
if not handler:
# This is a leaf
yield key_path, tree
return
flatten_with_keys = handler.flatten_with_keys_fn
if flatten_with_keys:
key_children, _ = flatten_with_keys(tree)
for k, c in key_children:
yield from _generate_key_paths((*key_path, k), c, is_leaf)
else:
# We registered this pytree but didn't add a flatten_with_keys_fn, complain.
raise ValueError(
f"Did not find a flatten_with_keys_fn for type: {node_type}. "
"Please pass a flatten_with_keys_fn argument to register_pytree_node."
)
def tree_map_with_path(
func: Callable[..., Any],
tree: PyTree,
*rests: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> PyTree:
"""Like :func:`tree_map`, but the provided callable takes an additional key path argument.
Args:
func: A function that takes ``2 + len(rests)`` arguments, to be applied at the
corresponding leaves of the pytrees. The first positional argument
to ``func`` is the key path of the leaf in question. The second
positional argument is the value of the leaf.
tree: A pytree to be mapped over, with each leaf providing the first positional
argument to function ``func``.
rests: A tuple of pytrees, each of which has the same structure as
``tree`` or has ``tree`` as a prefix.
is_leaf: An extra leaf predicate function that will be called at each
flattening step. The function should have a single argument with signature
``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
leaf or not. If the function is not specified, the default pytree registry will be used.
Returns
A new pytree with the same structure as ``tree`` but with the value at each leaf given by
``func(keypath, x, *xs)`` where ``keypath`` is the key path at the
corresponding leaf in ``tree``, ``x`` is the value at that leaf, and
``xs`` is the tuple of values at corresponding nodes in ``rests``.
"""
keypath_leaves, treespec = tree_flatten_with_path(tree, is_leaf)
keypath_leaves = list(zip(*keypath_leaves))
all_keypath_leaves = keypath_leaves + [treespec.flatten_up_to(r) for r in rests]
return treespec.unflatten(func(*xs) for xs in zip(*all_keypath_leaves))
def keystr(kp: KeyPath) -> str:
"""Given a key path, return a pretty-printed representation."""
return "".join([str(k) for k in kp])
def key_get(obj: Any, kp: KeyPath) -> Any:
"""Given an object and a key path, return the value at the key path."""
for k in kp:
obj = k.get(obj)
return obj
```
|
=============================================================================================================
SOURCE CODE FILE: _stats.py
LINES: 1
SIZE: 1.02 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_stats.py
ENCODING: utf-8
```py
# NOTE! PLEASE KEEP THIS FILE *FREE* OF TORCH DEPS! IT SHOULD BE IMPORTABLE ANYWHERE.
# IF YOU FEEL AN OVERWHELMING URGE TO ADD A TORCH DEP, MAKE A TRAMPOLINE FILE A LA torch._dynamo.utils
# AND SCRUB AWAY TORCH NOTIONS THERE.
import collections
import functools
from typing import Callable, TypeVar
from collections import OrderedDict
from typing_extensions import ParamSpec
simple_call_counter: OrderedDict[str, int] = collections.OrderedDict()
_P = ParamSpec("_P")
_R = TypeVar("_R")
def count_label(label: str) -> None:
prev = simple_call_counter.setdefault(label, 0)
simple_call_counter[label] = prev + 1
def count(fn: Callable[_P, _R]) -> Callable[_P, _R]:
@functools.wraps(fn)
def wrapper(*args: _P.args, **kwargs: _P.kwargs) -> _R:
if fn.__qualname__ not in simple_call_counter:
simple_call_counter[fn.__qualname__] = 0
simple_call_counter[fn.__qualname__] = simple_call_counter[fn.__qualname__] + 1
return fn(*args, **kwargs)
return wrapper
```
|
============================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.00 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_strobelight\__init__.py
ENCODING: utf-8
```py
```
|
=========================================================================================================================================
SOURCE CODE FILE: cli_function_profiler.py
LINES: 7
SIZE: 11.38 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_strobelight\cli_function_profiler.py
ENCODING: utf-8
```py
# mypy: disallow-untyped-defs
import functools
import logging
import os
import re
import subprocess
import time
from collections.abc import Sequence
from threading import Lock
from typing import Any, Callable, Optional, TypeVar
from typing_extensions import ParamSpec
logger = logging.getLogger("strobelight_function_profiler")
console_handler = logging.StreamHandler()
formatter = logging.Formatter(
"%(name)s, line %(lineno)d, %(asctime)s, %(levelname)s: %(message)s"
)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
logger.setLevel(logging.INFO)
logger.propagate = False
_P = ParamSpec("_P")
_R = TypeVar("_R")
class StrobelightCLIProfilerError(Exception):
"""
Raised when an error happens during strobelight profiling
"""
def _pid_namespace_link(pid: Optional[int] = None) -> str:
"""Returns the link to the process's namespace, example: pid:[4026531836]"""
PID_NAMESPACE_PATH = "/proc/{}/ns/pid"
pid = pid or os.getpid()
return os.readlink(PID_NAMESPACE_PATH.format(pid))
def _pid_namespace(pid: Optional[int] = None) -> int:
"""Returns the process's namespace id"""
pid = pid or os.getpid()
link = _pid_namespace_link(pid)
return int(link[link.find("[") + 1 : -1])
def _command_to_string(command: Sequence[str]) -> str:
return " ".join(command)
class StrobelightCLIFunctionProfiler:
"""
Note: this is a meta only tool.
StrobelightCLIFunctionProfiler can be used to profile a python function and
generate a strobelight link with the results. It works on meta servers but
does not requries an fbcode target.
When stop_at_error is false(default), error during profiling does not prevent
the work function from running.
Check function_profiler_example.py for an example.
"""
# This lock is used to make sure only one thread is running the profiler at any point.
_lock = Lock()
def __init__(
self,
*,
stop_at_error: bool = False,
max_profile_duration_sec: int = 60 * 10,
sample_each: float = 1e7, # sample each sample_each cycles.
run_user_name: str = "pytorch-strobelight-ondemand",
timeout_wait_for_running_sec: int = 60,
timeout_wait_for_finished_sec: int = 60,
recorded_env_variables: Optional[list[str]] = None,
sample_tags: Optional[list[str]] = None,
stack_max_len: int = 127,
async_stack_max_len: int = 127,
):
self.stop_at_error = stop_at_error
self.max_profile_duration_sec = max_profile_duration_sec
self.sample_each = sample_each
self.run_user_name = run_user_name
self.timeout_wait_for_running_sec = timeout_wait_for_running_sec
self.timeout_wait_for_finished_sec = timeout_wait_for_finished_sec
# Results of the most recent run.
# Tracks the strobelight run id of the most recent run
self.current_run_id: Optional[int] = None
self.sample_tags = sample_tags
def _run_async(self) -> None:
processId = os.getpid()
namespace = _pid_namespace(processId)
command = [
"strobeclient",
"run",
"--profiler",
"pyperf",
"--event",
"cycles",
"--async",
"--sample-interval",
f"{int(self.sample_each)}",
"--duration-ms",
f"{int(self.max_profile_duration_sec * 1000)}",
"--pid",
f"{namespace}:{processId}",
]
if self.sample_tags:
command.append("--sample-tags")
command.append(",".join(self.sample_tags))
logger.debug("running command: %s", _command_to_string(command))
result = subprocess.run(command, capture_output=True)
output = result.stderr.decode("utf-8")
logger.debug("output:\n{%s}", output)
if result.returncode != 0:
raise StrobelightCLIProfilerError(
f"failed to start strobelight profiling, error in run_async:{output}"
)
if match := re.search(r"INFO Run Id: (-?\d+)", output):
self.current_run_id = int(match.group(1))
return
raise StrobelightCLIProfilerError(
f"failed to start strobelight profiling, unexpected result {output}"
)
def _wait_for_running(self, counter: int = 0) -> None:
if counter > 20:
raise StrobelightCLIProfilerError(
"wait_for_running called more than 20 times"
)
command = ["strobeclient", "getRunStatus", "--run-id", f"{self.current_run_id}"]
logger.debug("running command: %s", _command_to_string(command))
result = subprocess.run(command, capture_output=True)
output = result.stderr.decode("utf-8")
logger.debug("output:\n{%s}", output)
if result.returncode != 0:
raise StrobelightCLIProfilerError(
f"failed to start strobelight profiling, error in wait_for_running:{output}"
)
if match := re.search("Profile run status: (.*)", output):
current_status = match.group(1)
if current_status == "RUNNING":
return
elif current_status == "PREPARING":
time.sleep(10)
self._wait_for_running(counter + 1)
return
else:
raise StrobelightCLIProfilerError(f"unexpected {current_status} phase")
raise StrobelightCLIProfilerError(f"unexpected output\n: {output} ")
def _stop_run(self) -> None:
command = ["strobeclient", "stopRun", "--run-id", str(self.current_run_id)]
logger.debug("running command: %s", _command_to_string(command))
result = subprocess.run(command, capture_output=True)
output = result.stderr.decode("utf-8")
logger.debug("output:\n{%s}", output)
if result.returncode != 0:
raise StrobelightCLIProfilerError(
f"failed to stop strobelight profiling, return code is not 0 :{output}"
)
if match := re.search("INFO ::1:(.*)", output):
current_status = match.group(1)
if current_status.__contains__("Success!"):
return
else:
raise StrobelightCLIProfilerError(
f"failed to stop strobelight profiling, got {current_status} result"
)
raise StrobelightCLIProfilerError(f"unexpected output\n: {output} ")
def _get_results(self) -> None:
command = ["strobeclient", "getRunStatus", "--run-id", str(self.current_run_id)]
logger.debug("running command: %s", _command_to_string(command))
result = subprocess.run(command, capture_output=True)
output = result.stderr.decode("utf-8")
logger.debug("output:\n{%s}", output)
if result.returncode != 0:
raise StrobelightCLIProfilerError(
f"failed to extract profiling results, return code is not 0 : {output}"
)
if match := re.search("INFO ::1:(.*)", output):
current_status = match.group(1)
if current_status.__contains__("Profile run status: PROCESSING"):
time.sleep(10)
self._get_results()
return
elif not current_status.__contains__("Profile run finished with SUCCESS"):
raise StrobelightCLIProfilerError(
f"failed to extract profiling results, unexpected response {output}"
)
for item in re.findall(
r"(Total samples(.*)|GraphProfiler(.*)|Icicle view \(python stack\)(.*))",
output,
):
logger.info(item[0])
def _stop_strobelight_no_throw(
self,
collect_results: bool,
) -> None:
try:
# call stop run
self._stop_run()
logger.info("strobelight profiling stopped")
logger.debug("collection stopped")
if not collect_results:
return
self._get_results()
except Exception:
logger.warning("error during stop_strobelight", exc_info=True)
# Return true if strobelight started and is running. Never throw.
def _start_strobelight(self) -> bool:
strobelight_started = False
try:
self._run_async()
strobelight_started = True
logger.info("strobelight run id is: %s", self.current_run_id)
self._wait_for_running()
logger.info("strobelight profiling running")
return True
except Exception:
logger.warning("error during start_strobelight:", exc_info=True)
if strobelight_started:
self._stop_strobelight_no_throw(collect_results=False)
return False
def profile(
self, work_function: Callable[_P, _R], *args: _P.args, **kwargs: _P.kwargs
) -> Optional[_R]:
self.current_run_id = None
if locked := StrobelightCLIFunctionProfiler._lock.acquire(False):
if not locked:
if self.stop_at_error:
raise StrobelightCLIProfilerError("concurrent runs not supported")
logger.warning("concurrent runs not supported")
return work_function(*args, **kwargs)
started = self._start_strobelight()
if not started:
if self.stop_at_error:
StrobelightCLIFunctionProfiler._lock.release()
raise StrobelightCLIProfilerError(
"failed to start strobelight profiling"
)
result = work_function(*args, **kwargs)
StrobelightCLIFunctionProfiler._lock.release()
return result
try:
logger.debug("collection started")
result = work_function(*args, **kwargs)
self._stop_strobelight_no_throw(collect_results=True)
StrobelightCLIFunctionProfiler._lock.release()
return result
except Exception as error:
logger.warning("work function throw exception", exc_info=True)
self._stop_strobelight_no_throw(collect_results=False)
StrobelightCLIFunctionProfiler._lock.release()
raise error
return None
# A function decorator that wraps profile, if no profiler is provided one with
# default args is created. A function can be annotated as:
# @strobelight()
# @strobelight(profiler = StrobelightFunctionProfiler(stop_at_error=True,..))
# @strobelight(stop_at_error=True,...)
def strobelight(
profiler: Optional[StrobelightCLIFunctionProfiler] = None, **kwargs: Any
) -> Callable[[Callable[_P, _R]], Callable[_P, Optional[_R]]]:
if not profiler:
profiler = StrobelightCLIFunctionProfiler(**kwargs)
def strobelight_inner(
work_function: Callable[_P, _R]
) -> Callable[_P, Optional[_R]]:
@functools.wraps(work_function)
def wrapper_function(*args: _P.args, **kwargs: _P.kwargs) -> Optional[_R]:
return profiler.profile(work_function, *args, **kwargs)
return wrapper_function
return strobelight_inner
```
|
======================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.00 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_sympy\__init__.py
ENCODING: utf-8
```py
```
|
=======================================================================================================================
SOURCE CODE FILE: functions.py
LINES: 1
SIZE: 50.44 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_sympy\functions.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import functools
import math
import operator
import sys
from typing import Callable, Optional, SupportsFloat, TYPE_CHECKING, TypeVar, Union
from typing_extensions import TypeVarTuple, Unpack
import sympy
from sympy import S
from sympy.core import sympify
from sympy.core.expr import Expr
from sympy.core.function import Application
from sympy.core.logic import _torf, fuzzy_and, fuzzy_or
from sympy.core.numbers import equal_valued
from sympy.core.operations import LatticeOp, ShortCircuit
from sympy.core.sorting import ordered
from sympy.core.traversal import walk
from sympy.printing.precedence import PRECEDENCE
from sympy.utilities.iterables import sift
from .numbers import int_oo
if TYPE_CHECKING:
from collections.abc import Iterable
_T = TypeVar("_T", bound=SupportsFloat)
_Ts = TypeVarTuple("_Ts")
# Portions of this file are adapted from the Sympy codebase, which was
# licensed as follows:
#
# Copyright (c) 2006-2023 SymPy Development Team
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# a. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# b. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# c. Neither the name of SymPy nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
__all__ = [
"FloorDiv",
"ModularIndexing",
"Where",
"PythonMod",
"Mod",
"CleanDiv",
"CeilToInt",
"FloorToInt",
"CeilDiv",
"IntTrueDiv",
"FloatTrueDiv",
"LShift",
"RShift",
"IsNonOverlappingAndDenseIndicator",
"TruncToFloat",
"TruncToInt",
"RoundToInt",
"RoundDecimal",
"ToFloat",
"FloatPow",
"PowByNatural",
"Identity",
]
def _is_symbols_binary_summation(expr: sympy.Expr) -> bool:
# No need to check that two args are not the same, since expr is pr-optimized but we do it anyway.
return (
expr.is_Add
and len(expr._args) == 2
and expr._args[0].is_symbol
and expr._args[1].is_symbol
and expr._args[0] is not expr._args[1]
)
def _keep_float(
f: Callable[[Unpack[_Ts]], _T]
) -> Callable[[Unpack[_Ts]], Union[_T, sympy.Float]]:
@functools.wraps(f)
def inner(*args: Unpack[_Ts]) -> Union[_T, sympy.Float]:
r: Union[_T, sympy.Float] = f(*args)
if any(isinstance(a, sympy.Float) for a in args) and not isinstance(
r, sympy.Float
):
r = sympy.Float(float(r))
return r
return inner
def fuzzy_eq(x: Optional[bool], y: Optional[bool]) -> Optional[bool]:
if None in (x, y):
return None
return x == y
def simple_floordiv_gcd(p: sympy.Basic, q: sympy.Basic) -> sympy.Basic:
"""
Fast path for sympy.gcd, using a simple factoring strategy.
We try to rewrite p and q in the form n*e*p1 + n*e*p2 and n*e*q0,
where n is the greatest common integer factor and e is the largest
syntactic common factor (i.e., common sub-expression) in p and q.
Then the gcd returned is n*e, cancelling which we would be left with
p1 + p2 and q0.
Note that further factoring of p1 + p2 and q0 might be possible with
sympy.factor (which uses domain-specific theories). E.g., we are unable
to find that x*y + x + y + 1 is divisible by x + 1. More generally,
when q is of the form q1 + q2 (instead of being already factored) it
might be necessary to fall back on sympy.gcd.
"""
def integer_coefficient(x: sympy.Basic) -> int:
integer_coefficients: list[int] = [
abs(int(arg))
for arg in sympy.Mul.make_args(x)
if isinstance(arg, (int, sympy.Integer))
]
return math.prod(integer_coefficients)
def integer_factor(expr: sympy.Basic) -> int:
integer_factors: Iterable[int] = map(
integer_coefficient, sympy.Add.make_args(expr)
)
return functools.reduce(math.gcd, integer_factors)
gcd: int = math.gcd(integer_factor(p), integer_factor(q))
p, q = p / gcd, q / gcd # type: ignore[operator, assignment] # remove in py3.12
base_splits: list[tuple[sympy.Basic, ...]] = list(
map(sympy.Mul.make_args, sympy.Add.make_args(p))
)
divisor_split: tuple[sympy.Basic, ...] = sympy.Mul.make_args(q)
for x in divisor_split:
if all(x in base_split for base_split in base_splits):
gcd = gcd * x # type: ignore[operator] # remove in py3.12
return gcd # type: ignore[return-value] # remove in py3.12
# It would be nice to have assertions on whether or not inputs is_integer
# However, with bugs like https://github.com/sympy/sympy/issues/26620 sympy
# sometimes inconsistently reports floats an integers.
#
# What we can assume from sympy is that if something is an int, it
# definitely is is_integer, but if it is a float it may or may not
# be is_integer. So we are unable to do strong asserts that things
# are NOT integers.
# TODO: In Triton, // rounds to zero, but in Python, it is floor division.
# When we can prove both arguments are non-negative, we should just have a
# GenericFloorDiv (name pending) which can codegen efficiently in Python/C,
# and then PythonFloorDiv and CIntDiv which have the appropriate rounding
# semantics.
#
# Right now, FloorDiv de facto changes behavior if arguments are negative or
# not, this can potentially cause correctness issues.
class FloorDiv(sympy.Function):
"""
We maintain this so that:
1. We can use divisibility guards to simplify FloorDiv(a, b) to a / b.
2. Printing out the expression is nicer (compared to say, representing a//b as (a - a % b) / b)
NB: This is Python-style floor division, round to -Inf
"""
nargs: tuple[int, ...] = (2,)
precedence: int = 35 # lower precedence than add
is_integer: bool = True
@property
def base(self) -> sympy.Basic:
return self.args[0]
@property
def divisor(self) -> sympy.Basic:
return self.args[1]
def _sympystr(self, printer: sympy.printing.StrPrinter) -> str:
base = printer.parenthesize(self.base, PRECEDENCE["Atom"] - 0.5)
divisor = printer.parenthesize(self.divisor, PRECEDENCE["Atom"] - 0.5)
return f"({base}//{divisor})"
# Automatic evaluation.
# https://docs.sympy.org/latest/guides/custom-functions.html#best-practices-for-eval
@classmethod
def eval(
cls, base: sympy.Integer, divisor: sympy.Integer
) -> Union[sympy.Basic, None]:
# python test/test_dynamic_shapes.py -k TestDimConstraints.test_dim_constraints_solve_full
# Assert triggered by inequality solver
# assert base.is_integer, base
# assert divisor.is_integer, divisor
# We don't provide the same error message as in Python because SymPy
# makes it difficult to check the types.
if divisor.is_zero:
raise ZeroDivisionError("division by zero")
if base in (int_oo, -int_oo, sympy.oo, -sympy.oo) and divisor in (
int_oo,
-int_oo,
sympy.oo,
-sympy.oo,
):
return sympy.nan
if base is sympy.nan or divisor is sympy.nan:
return sympy.nan
if base.is_zero:
return sympy.S.Zero
if base.is_integer and equal_valued(divisor, 1):
return base
if base.is_integer and equal_valued(divisor, -1):
return sympy.Mul(base, -1)
if (
isinstance(base, sympy.Number)
and isinstance(divisor, sympy.Number)
and (
base in (int_oo, -int_oo, sympy.oo, -sympy.oo)
or divisor in (int_oo, -int_oo, sympy.oo, -sympy.oo)
)
):
r = float(base) / float(divisor)
if r == math.inf:
return int_oo
elif r == -math.inf:
return -int_oo
elif math.isnan(r):
return sympy.nan
else:
return sympy.Integer(math.floor(r))
if isinstance(base, sympy.Integer) and isinstance(divisor, sympy.Integer):
return sympy.Integer(int(base) // int(divisor))
if isinstance(base, FloorDiv):
return FloorDiv(base.args[0], base.args[1] * divisor)
# Expands (x + y) // b into x // b + y // b.
# This only works if floor is an identity, i.e. x / b is an integer.
if isinstance(divisor, sympy.Integer):
quotients = 0
terms = []
for term in sympy.Add.make_args(base):
quotient = term / divisor
if quotient.is_integer:
terms.append(term)
quotients += quotient
if len(terms) != 0:
# Passing evaluate = False since expression will be optimized during the subtraction post its construction.
return (
FloorDiv(base - sympy.Add(*terms, evaluate=False), divisor)
+ quotients
)
try:
gcd = simple_floordiv_gcd(base, divisor)
if equal_valued(gcd, 1) and isinstance(divisor, sympy.Add):
gcd = sympy.gcd(base, divisor)
if not equal_valued(gcd, 1):
return FloorDiv(
sympy.simplify(base / gcd), sympy.simplify(divisor / gcd)
)
except sympy.PolynomialError:
pass # https://github.com/pytorch/pytorch/issues/108276
return None
def _ccode(self, printer):
base = printer.parenthesize(self.base, PRECEDENCE["Atom"] - 0.5)
divisor = printer.parenthesize(self.divisor, PRECEDENCE["Atom"] - 0.5)
return f"floor({base}/{divisor})"
class ModularIndexing(sympy.Function):
"""
ModularIndexing(a, b, c) => (a // b) % c where % is the C modulus
"""
nargs: tuple[int, ...] = (3,)
is_integer: bool = True
precedence: int = 35 # lower precedence than add
@classmethod
def eval(
cls, base: sympy.Integer, divisor: sympy.Integer, modulus: sympy.Integer
) -> Optional[sympy.Basic]:
if base == 0 or modulus == 1:
return sympy.S.Zero
if (
isinstance(base, sympy.Integer)
and isinstance(divisor, sympy.Integer)
and isinstance(modulus, sympy.Integer)
):
return (base // divisor) % modulus
try:
if divisor != 1:
gcd = sympy.gcd(base, divisor)
if gcd != 1:
return ModularIndexing(
sympy.simplify(base / gcd),
sympy.simplify(divisor / gcd),
modulus,
)
except sympy.PolynomialError:
pass # https://github.com/pytorch/pytorch/issues/108276
if isinstance(base, sympy.Add):
new_terms: list[sympy.Integer] = []
all_positive: bool = True
for term in base.args:
if sympy.gcd(term, modulus * divisor) != modulus * divisor:
if (isinstance(term, sympy.Integer) and term < 0) or (
isinstance(term, sympy.Mul)
and isinstance(term.args[0], sympy.Integer)
and term.args[0] < 0
):
# workaround for https://github.com/openai/triton/issues/619,
# if there are negative terms, // produces wrong result
# TODO if https://github.com/openai/triton/issues/619 is fixed
# this optimization would become valid
all_positive = False
break
else:
new_terms.append(term)
if len(new_terms) != len(base.args) and all_positive:
return ModularIndexing(sum(new_terms), divisor, modulus)
if isinstance(base, FloorDiv):
return ModularIndexing(base.args[0], base.args[1] * divisor, modulus)
return None
def _eval_is_nonnegative(self) -> Optional[bool]:
p, q = self.args[:2]
return fuzzy_eq(p.is_nonnegative, q.is_nonnegative) # type: ignore[attr-defined]
def _eval_is_positive(self) -> Optional[bool]:
p, q = self.args[:2]
return fuzzy_eq(p.is_positive, q.is_positive) # type: ignore[attr-defined]
class Where(sympy.Function):
"""
Good ol' ternary operator
"""
nargs: tuple[int, ...] = (3,)
precedence: int = 35 # lower precedence than add
def _eval_is_integer(self) -> Optional[bool]:
return True if self.args[1].is_integer and self.args[2].is_integer else None # type: ignore[attr-defined]
def _eval_is_nonnegative(self) -> Optional[bool]:
return (
True
if self.args[1].is_nonnegative and self.args[2].is_nonnegative # type: ignore[attr-defined]
else None
)
def _eval_is_positive(self) -> Optional[bool]:
return True if self.args[1].is_positive and self.args[2].is_positive else None # type: ignore[attr-defined]
@classmethod
def eval(
cls, c: sympy.Basic, p: sympy.Basic, q: sympy.Basic
) -> Optional[sympy.Basic]:
if c == sympy.true:
return p
elif c == sympy.false:
return q
return None
# Python-style modulus: take sign from RHS
class PythonMod(sympy.Function):
nargs: tuple[int, ...] = (2,)
precedence: int = 35 # lower precedence than add
is_integer: bool = True
@classmethod
def eval(cls, p: sympy.Expr, q: sympy.Expr) -> Optional[sympy.Expr]:
# python test/dynamo/test_export.py -k ExportTests.test_trivial_constraint
# Triggered by sympy.solvers.inequalities.reduce_inequalities
# assert p.is_integer, p
# assert q.is_integer, q
if q.is_zero:
raise ZeroDivisionError("Modulo by zero")
# Three cases:
# 1. p == 0
# 2. p is either q or -q
# 3. p is integer and q == 1
if p is S.Zero or p in (q, -q) or q == 1:
return S.Zero
# Evaluate if they are both literals.
if q.is_Number and p.is_Number:
return p % q
# If q == 2, it's a matter of whether p is odd or even.
if q.is_Number and q == 2:
if p.is_even:
return S.Zero
if p.is_odd:
return S.One
# If p is a multiple of q.
r = p / q
if r.is_integer:
return S.Zero
# If p < q and its ratio is positive, then:
# - floor(p / q) = 0
# - p % q = p - floor(p / q) * q = p
less = p < q
if less.is_Boolean and bool(less) and r.is_positive:
return p
if sympy.Mod(p, q) == 0:
return S.Zero
return None
# NB: args[1] for PythonMod
def _eval_is_nonnegative(self) -> Optional[bool]:
return True if self.args[1].is_positive else None # type: ignore[attr-defined]
def _eval_is_nonpositive(self) -> Optional[bool]:
return True if self.args[1].is_negative else None # type: ignore[attr-defined]
# Generic modulus: only defined on non-negative arguments
class Mod(sympy.Function):
nargs = (2,)
precedence: int = 35 # lower precedence than add
is_integer = True
is_nonnegative = True
@classmethod
def eval(cls, p, q):
# This was adapted from: sympy/core/mod.py
# Triggered by
# python test/test_dynamic_shapes.py -k TestDimConstraints.test_dim_constraints_solve_full
# assert p.is_integer, p
# assert q.is_integer, q
if q.is_zero:
raise ZeroDivisionError("Modulo by zero")
# Three cases:
# 1. p == 0
# 2. p is either q or -q
# 3. p is integer and q == 1
if p is S.Zero or p in (q, -q) or q == 1:
return S.Zero
# Evaluate if they are both literals.
if q.is_Number and p.is_Number:
assert p >= 0, p
assert q >= 1, q
return p % q
# If q == 2, it's a matter of whether p is odd or even.
if q.is_Number and q == 2:
if p.is_even:
return S.Zero
if p.is_odd:
return S.One
# If p is a multiple of q.
r = p / q
if r.is_integer:
return S.Zero
# If p < q and its ratio is positive, then:
# - floor(p / q) = 0
# - p % q = p - floor(p / q) * q = p
less = p < q
if less.is_Boolean and bool(less) and r.is_positive:
return p
class CleanDiv(FloorDiv):
"""
Div where we can assume no rounding.
This is to enable future optimizations.
"""
# Don't use sympy ceiling/floor as they will attempt simplifications involving
# frac
class CeilToInt(sympy.Function):
is_integer = True
@classmethod
def eval(cls, number):
# assert number.is_integer is not True, number
if number in (sympy.oo, int_oo):
return int_oo
if number in (-sympy.oo, -int_oo):
return -int_oo
if isinstance(number, sympy.Number):
return sympy.Integer(math.ceil(float(number)))
class FloorToInt(sympy.Function):
is_integer = True
@classmethod
def eval(cls, number):
if number in (sympy.oo, int_oo):
return int_oo
if number in (-sympy.oo, int_oo):
return -int_oo
if isinstance(number, sympy.Integer):
return number
if isinstance(number, sympy.Number):
return sympy.Integer(math.floor(float(number)))
class CeilDiv(sympy.Function):
"""
Div used in indexing that rounds up.
"""
is_integer = True
def __new__(cls, base, divisor):
base = sympy.sympify(base)
divisor = sympy.sympify(divisor)
if sympy.gcd(base, divisor) == divisor:
return CleanDiv(base, divisor)
else:
return FloorDiv(base + (divisor - 1), divisor)
class LShift(sympy.Function):
is_integer = True
@classmethod
def eval(cls, base, shift):
if shift < 0:
raise ValueError("negative shift count")
return base * 2**shift
class RShift(sympy.Function):
is_integer = True
@classmethod
def eval(cls, base, shift):
if shift < 0:
raise ValueError("negative shift count")
return FloorDiv(base, 2**shift)
class MinMaxBase(Expr, LatticeOp): # type: ignore[misc]
def __new__(cls, *original_args, **assumptions):
from sympy.core.parameters import global_parameters
evaluate = assumptions.pop("evaluate", global_parameters.evaluate)
args = (sympify(arg) for arg in original_args)
# See the comment in _satisfy_unique_summations_symbols.
unique_summations_symbols = (
None
if not evaluate
else cls._satisfy_unique_summations_symbols(original_args)
)
if evaluate:
try:
# first standard filter, for cls.zero and cls.identity
# also reshape Max(a, Max(b, c)) to Max(a, b, c)
args = frozenset(cls._new_args_filter(args)) # type: ignore[assignment]
except ShortCircuit:
return cls.zero # type: ignore[attr-defined]
# No need to run _collapse_arguments and _find_localzeros, see the comment
# in _satisfy_unique_summations_symbols.
if unique_summations_symbols is None:
# remove redundant args that are easily identified
args = cls._collapse_arguments(args, **assumptions)
# find local zeros
args = cls._find_localzeros(args, **assumptions)
args = frozenset(args)
if not args:
return cls.identity # type: ignore[attr-defined]
if len(args) == 1:
return list(args).pop()
# base creation
obj = Expr.__new__(cls, *ordered(args), **assumptions)
obj._argset = args
obj.unique_summations_symbols = unique_summations_symbols
return obj
@classmethod
def _satisfy_unique_summations_symbols(
cls, args
) -> Optional[set[sympy.core.symbol.Symbol]]:
"""
One common case in some models is building expressions of the form
max(max(max(a+b...), c+d), e+f) which is simplified to max(a+b, c+d, e+f, ...).
For such expressions, we call the Max constructor X times (once for each nested
max) and the expression gets flattened.
An expensive cost in constructing those expressions is running _collapse_arguments
and _find_localzeros. However, those two optimizations are unnecessary when the args
to max are all of the form a+b, c+d, ..etc where each term uses a unique set of symbols.
This function is used to detect such properties of the expressions we are building
and if so inform that we do not need to run those optimizations. To detect those,
we store a property in the expression that tells that this expression is a min/max
operation over terms that use unique symbols "unique_summations_symbols". This property
also memoize the set of symbols used in all the terms to make it faster to detect this
property inductively.
When we apply max to add a new term, all we need to do is check if the new term uses
unique symbols (with respect to existing terms and itself).
Example:
t = Max(a+b, c+d) ==> satisfies the property
Max(t, h+j) ==> h,j not in [a,b,c,d] => satisfy the property.
The function returns None if the new expression does not satisfy the unique_summations_symbols
property. Otherwise, it returns a new set of unique symbols.
"""
if len(args) != 2:
return None
(lhs, rhs) = (
(args[1], args[0])
if isinstance(args[1], MinMaxBase)
else (args[0], args[1])
)
if not _is_symbols_binary_summation(rhs):
return None
# base case max(a+b, c+d) ==> satisfies the property if a+b and c+d use unique symbols.
if _is_symbols_binary_summation(lhs):
return cls._unique_symbols(args)
# inductive case max(t, h+j) ==> satisfies the property if h, j not in t.unique_summations_symbols
if isinstance(lhs, MinMaxBase):
lhs_unique_summations_symbols = getattr(
lhs, "unique_summations_symbols", None
)
if lhs_unique_summations_symbols is not None:
return cls._unique_symbols([rhs], lhs_unique_summations_symbols)
return None
@classmethod
def _unique_symbols(
cls, args, initial_set: Optional[set[sympy.core.symbol.Symbol]] = None
) -> Optional[set[sympy.core.symbol.Symbol]]:
"""
Return seen_symbols if all atoms in all args are all unique symbols,
else returns None. initial_set can be used to represent initial value for seen_symbols
"""
seen_symbols = set() if initial_set is None else initial_set
for arg in args:
for element in arg.atoms():
if not isinstance(element, sympy.core.symbol.Symbol):
return None
elif element in seen_symbols:
return None
else:
seen_symbols.add(element)
return seen_symbols
@classmethod
def _collapse_arguments(cls, args, **assumptions):
"""Remove redundant args.
Examples
========
>>> from sympy import Min, Max
>>> from sympy.abc import a, b, c, d, e
Any arg in parent that appears in any
parent-like function in any of the flat args
of parent can be removed from that sub-arg:
>>> Min(a, Max(b, Min(a, c, d)))
Min(a, Max(b, Min(c, d)))
If the arg of parent appears in an opposite-than parent
function in any of the flat args of parent that function
can be replaced with the arg:
>>> Min(a, Max(b, Min(c, d, Max(a, e))))
Min(a, Max(b, Min(a, c, d)))
"""
if not args:
return args
args = list(ordered(args))
if cls is Min:
other = Max
else:
other = Min # type: ignore[assignment]
# find global comparable max of Max and min of Min if a new
# value is being introduced in these args at position 0 of
# the ordered args
if args[0].is_number:
sifted = mins, maxs = [], [] # type: ignore[var-annotated]
for i in args:
for v in walk(i, Min, Max):
if v.args[0].is_comparable:
sifted[isinstance(v, Max)].append(v)
small = Min.identity
for i in mins:
v = i.args[0]
if v.is_number and (v < small) == True: # noqa: E712
small = v
big = Max.identity
for i in maxs:
v = i.args[0]
if v.is_number and (v > big) == True: # noqa: E712
big = v
# at the point when this function is called from __new__,
# there may be more than one numeric arg present since
# local zeros have not been handled yet, so look through
# more than the first arg
if cls is Min:
for arg in args:
if not arg.is_number:
break
if (arg < small) == True: # noqa: E712
small = arg
elif cls == Max:
for arg in args:
if not arg.is_number:
break
if (arg > big) == True: # noqa: E712
big = arg
T = None
if cls is Min:
if small != Min.identity:
other = Max
T = small
elif big != Max.identity:
other = Min # type: ignore[assignment]
T = big
if T is not None:
# remove numerical redundancy
for i in range(len(args)):
a = args[i]
if isinstance(a, other):
a0 = a.args[0]
if ( # noqa: E712
(a0 > T) if other == Max else (a0 < T) # noqa: E712
) == True: # noqa: E712
args[i] = cls.identity # type: ignore[attr-defined]
# remove redundant symbolic args
def do(ai, a):
if not isinstance(ai, (Min, Max)):
return ai
cond = a in ai.args
if not cond:
return ai.func(*[do(i, a) for i in ai.args], evaluate=False)
if isinstance(ai, cls):
return ai.func(*[do(i, a) for i in ai.args if i != a], evaluate=False)
return a
for i, a in enumerate(args):
args[i + 1 :] = [do(ai, a) for ai in args[i + 1 :]]
# factor out common elements as for
# Min(Max(x, y), Max(x, z)) -> Max(x, Min(y, z))
# and vice versa when swapping Min/Max -- do this only for the
# easy case where all functions contain something in common;
# trying to find some optimal subset of args to modify takes
# too long
def factor_minmax(args):
is_other = lambda arg: isinstance(arg, other) # noqa: E731
other_args, remaining_args = sift(args, is_other, binary=True)
if not other_args:
return args
# Min(Max(x, y, z), Max(x, y, u, v)) -> {x,y}, ({z}, {u,v})
arg_sets = [set(arg.args) for arg in other_args]
common = set.intersection(*arg_sets)
if not common:
return args
new_other_args = list(common)
arg_sets_diff = [arg_set - common for arg_set in arg_sets]
# If any set is empty after removing common then all can be
# discarded e.g. Min(Max(a, b, c), Max(a, b)) -> Max(a, b)
if all(arg_sets_diff):
other_args_diff = [other(*s, evaluate=False) for s in arg_sets_diff]
new_other_args.append(cls(*other_args_diff, evaluate=False))
other_args_factored = other(*new_other_args, evaluate=False)
return remaining_args + [other_args_factored]
if len(args) > 1:
args = factor_minmax(args)
return args
@classmethod
def _new_args_filter(cls, arg_sequence):
"""
Generator filtering args.
first standard filter, for cls.zero and cls.identity.
Also reshape ``Max(a, Max(b, c))`` to ``Max(a, b, c)``,
and check arguments for comparability
"""
for arg in arg_sequence:
# pre-filter, checking comparability of arguments
if (
not isinstance(arg, Expr)
or arg.is_extended_real is False
or (arg.is_number and not arg.is_comparable)
):
raise ValueError(f"The argument '{arg}' is not comparable.")
if arg == cls.zero: # type: ignore[attr-defined]
raise ShortCircuit(arg)
elif arg == cls.identity: # type: ignore[attr-defined]
continue
elif arg.func == cls:
yield from arg.args
else:
yield arg
@classmethod
def _find_localzeros(cls, values, **options):
"""
Sequentially allocate values to localzeros.
When a value is identified as being more extreme than another member it
replaces that member; if this is never true, then the value is simply
appended to the localzeros.
Unlike the sympy implementation, we only look for zero and one, we don't
do generic is connected test pairwise which is slow
"""
# First, collapse all numeric arguments
other_values = set()
num_value = None
for arg in values:
if arg.is_Number:
if num_value is None:
num_value = arg
else:
if cls is Max:
num_value = max(num_value, arg)
elif cls is Min:
num_value = min(num_value, arg)
else:
raise AssertionError(f"impossible {cls}")
else:
other_values.add(arg)
# Special cases when there is only one symbolic value
if num_value is None:
return other_values
if len(other_values) == 0:
return {num_value}
if len(other_values) == 1:
other_value = next(iter(other_values))
if num_value in (0.0, 0) and other_value.is_nonnegative:
return other_values if cls is Max else {num_value}
if num_value == 1 and other_value.is_positive:
return other_values if cls is Max else {num_value}
other_values.add(num_value)
return other_values
_eval_is_algebraic = lambda s: _torf(i.is_algebraic for i in s.args) # noqa: E731
_eval_is_antihermitian = lambda s: _torf( # noqa: E731
i.is_antihermitian for i in s.args # noqa: E731
) # noqa: E731
_eval_is_commutative = lambda s: _torf( # noqa: E731
i.is_commutative for i in s.args # noqa: E731
) # noqa: E731
_eval_is_complex = lambda s: _torf(i.is_complex for i in s.args) # noqa: E731
_eval_is_composite = lambda s: _torf(i.is_composite for i in s.args) # noqa: E731
_eval_is_even = lambda s: _torf(i.is_even for i in s.args) # noqa: E731
_eval_is_finite = lambda s: _torf(i.is_finite for i in s.args) # noqa: E731
_eval_is_hermitian = lambda s: _torf(i.is_hermitian for i in s.args) # noqa: E731
_eval_is_imaginary = lambda s: _torf(i.is_imaginary for i in s.args) # noqa: E731
_eval_is_infinite = lambda s: _torf(i.is_infinite for i in s.args) # noqa: E731
_eval_is_integer = lambda s: _torf(i.is_integer for i in s.args) # noqa: E731
_eval_is_irrational = lambda s: _torf(i.is_irrational for i in s.args) # noqa: E731
_eval_is_negative = lambda s: _torf(i.is_negative for i in s.args) # noqa: E731
_eval_is_noninteger = lambda s: _torf(i.is_noninteger for i in s.args) # noqa: E731
_eval_is_nonnegative = lambda s: _torf( # noqa: E731
i.is_nonnegative for i in s.args # noqa: E731
) # noqa: E731
_eval_is_nonpositive = lambda s: _torf( # noqa: E731
i.is_nonpositive for i in s.args # noqa: E731
) # noqa: E731
_eval_is_nonzero = lambda s: _torf(i.is_nonzero for i in s.args) # noqa: E731
_eval_is_odd = lambda s: _torf(i.is_odd for i in s.args) # noqa: E731
_eval_is_polar = lambda s: _torf(i.is_polar for i in s.args) # noqa: E731
_eval_is_positive = lambda s: _torf(i.is_positive for i in s.args) # noqa: E731
_eval_is_prime = lambda s: _torf(i.is_prime for i in s.args) # noqa: E731
_eval_is_rational = lambda s: _torf(i.is_rational for i in s.args) # noqa: E731
_eval_is_real = lambda s: _torf(i.is_real for i in s.args) # noqa: E731
_eval_is_extended_real = lambda s: _torf( # noqa: E731
i.is_extended_real for i in s.args # noqa: E731
) # noqa: E731
_eval_is_transcendental = lambda s: _torf( # noqa: E731
i.is_transcendental for i in s.args # noqa: E731
) # noqa: E731
_eval_is_zero = lambda s: _torf(i.is_zero for i in s.args) # noqa: E731
class Max(MinMaxBase, Application): # type: ignore[misc]
r"""
Return, if possible, the maximum value of the list.
"""
zero = S.Infinity
identity = S.NegativeInfinity
def _eval_is_positive(self): # type:ignore[override]
return fuzzy_or(a.is_positive for a in self.args) # type: ignore[attr-defined]
def _eval_is_nonnegative(self): # type:ignore[override]
return fuzzy_or(a.is_nonnegative for a in self.args) # type: ignore[attr-defined]
def _eval_is_negative(self): # type:ignore[override]
return fuzzy_and(a.is_negative for a in self.args)
class Min(MinMaxBase, Application): # type: ignore[misc]
"""
Return, if possible, the minimum value of the list.
"""
zero = S.NegativeInfinity
identity = S.Infinity
def _eval_is_positive(self): # type:ignore[override]
return fuzzy_and(a.is_positive for a in self.args) # type: ignore[attr-defined]
def _eval_is_nonnegative(self): # type:ignore[override]
return fuzzy_and(a.is_nonnegative for a in self.args) # type: ignore[attr-defined]
def _eval_is_negative(self): # type:ignore[override]
return fuzzy_or(a.is_negative for a in self.args)
def safe_pow(base, exp):
sign = 1
if base < 0:
base = -base
sign = 1 if exp % 2 == 0 else -1
return sign * _safe_pow(base, exp)
# Prevent people from overflowing pow
def _safe_pow(base, exponent):
if exponent < 0:
raise ValueError("Exponent must be non-negative.")
if exponent == 0:
return 1
half_exp = safe_pow(base, exponent // 2)
if half_exp is int_oo:
return int_oo
# TODO: microoptimization is to avoid overflowing into arbitrary precision
# and detect overflow prior to doing operations
result = half_exp * half_exp
if result > sys.maxsize:
return int_oo
if exponent % 2 == 1:
result *= base
if result > sys.maxsize:
return int_oo
return result
class PowByNatural(sympy.Function):
is_integer = True
precedence: int = 50 # precedence of mul
@classmethod
def eval(cls, base, exp):
if isinstance(base, sympy.Integer) and isinstance(exp, sympy.Integer):
r = safe_pow(base, exp)
if r in (-int_oo, int_oo):
return r
return sympy.Integer(r)
if isinstance(exp, sympy.Integer):
# Rely on regular sympy Pow for this (note that iterated
# multiplication turns into a Pow anyway, you can't escape!!)
return sympy.Pow(base, exp)
if exp in (int_oo, sympy.oo):
if base.is_nonnegative:
return int_oo
elif base.is_negative:
return sympy.zoo # this is apparently what (-2)**sympy.oo does
# NB: do NOT translate into sympy.Pow, we will lose knowledge that exp
# is a natural number if we do
# base is assumed to be nonnegative, thereby prevent complex numbers from
# occuring
class FloatPow(sympy.Function):
is_real = True
precedence: int = 60 # precedence of pow
@classmethod
def eval(cls, base, exp):
# NB: These test sympy.Number, not sympy.Float, because:
# - Sometimes we may have sympy.oo or int_oo, and that's not a Float
# (but coerces to math.Inf)
# - Sometimes Float(0.0) will unpredictably decay to Integer(0),
# but we should still accept it in floatey contexts
if isinstance(base, sympy.Number) and isinstance(exp, sympy.Number):
return sympy.Float(float(base) ** float(exp))
# NB: do not do any nontrivial reasoning
# Overloaded to be compatible with regular Python.
# https://github.com/pytorch/pytorch/issues/90900
#
# In particular, sympy division is willing to simplify x/x == 1
# where 1 is an integer, but this must be a float if x was float.
class FloatTrueDiv(sympy.Function):
is_real = True
precedence: int = 35 # lower precedence than add
@classmethod
def eval(cls, base, divisor):
# assert base.is_integer is not True, base
# assert divisor.is_integer is not True, divisor
if divisor.is_zero:
raise ZeroDivisionError("division by zero")
if isinstance(base, sympy.Number) and isinstance(divisor, sympy.Number):
return sympy.Float(float(base) / float(divisor))
# Overloaded to be compatible with regular Python. We distinguish this from
# FloatTrueDiv, because the code generation has to be different for this case:
# Python has a fancy algorithm for integer true division that isn't just
# "promote both arguments to float and use float division", so you need to
# codegen it differently. While technically you can work it out from the
# types of the input, this is often inconvenient to do in Inductor codegen,
# so just have a different operator
# NB: Right now, Inductor codegen doesn't implement this correctly lol
class IntTrueDiv(sympy.Function):
is_real = True
precedence: int = 35 # lower precedence than add
@classmethod
def eval(cls, base, divisor):
if divisor.is_zero:
raise ZeroDivisionError("division by zero")
if (
isinstance(base, sympy.Number)
and isinstance(divisor, sympy.Number)
and (
base in (int_oo, -int_oo, sympy.oo, -sympy.oo)
or divisor in (int_oo, -int_oo, sympy.oo, -sympy.oo)
)
):
# Don't have to worry about precision here, you're getting zero or
# inf from the division
return sympy.Float(float(base) / float(divisor))
if isinstance(base, sympy.Integer) and isinstance(divisor, sympy.Integer):
return sympy.Float(int(base) / int(divisor))
# TODO: As an indicator, this != 0 implies == 1 (and vice versa).
# Because we do not have the ability to guard on the stride permutation
# at the moment, it is hard to make further inferences when this is true,
# as although we know the tensor is contiguous in *some* layout, we don't
# know which one (however, you could, for example, make the inference that
# reshaping this to a 1D tensor can be guard-free.)
class IsNonOverlappingAndDenseIndicator(sympy.Function):
is_integer = True
@classmethod
def eval(cls, *args):
assert len(args) % 2 == 0
dim = len(args) // 2
sizes = args[0:dim]
strides = args[dim:]
# sym_node imported in torch.__init__. Local import to avoid an import cycle
from torch.fx.experimental.symbolic_shapes import (
eval_is_non_overlapping_and_dense,
)
if all(isinstance(a, sympy.Integer) for a in args):
return eval_is_non_overlapping_and_dense(
[int(a) for a in sizes], [int(a) for a in strides]
)
if dim == 1:
# Manually implement the rank one short circuit
if strides[0].is_Number and strides[0] == 1:
return 1
if sizes[0].is_Number and sizes[0] < 2:
return 1
# return 0 case covered by case above
# TODO: Inability to access size-obliviousness sucks: if we have a
# size oblivious test on a size-like unbacked SymInt, we could
# confidently return zero when we have a size-like u0 stride
# and a size-like u1 size. Maybe a fancy ValueRanges analysis for
# this function could help figure this out.
if all(isinstance(a, sympy.Integer) for a in strides):
assert dim != 0
# When all strides are integral, we can sort, and the size for the
# largest stride doesn't matter and can be arbitrarily symbolic
s_sizes, s_strides = zip(
*sorted(zip(sizes, strides), key=operator.itemgetter(1))
)
# Put something arbitrary in the max size spot, it'll be ignored
if all(isinstance(a, sympy.Integer) for a in s_sizes[:-1]):
s_sizes = s_sizes[:-1] + (42,)
# We can reuse the regular eval, because it is invariant to
# permutation of dimensions
return eval_is_non_overlapping_and_dense(
[int(a) for a in s_sizes], [int(a) for a in s_strides]
)
return None
# NB: this is inconsistent with math.trunc in Python
class TruncToFloat(sympy.Function):
is_real = True
@classmethod
def eval(cls, number):
# assert number.is_integer is not True, number
if isinstance(number, sympy.Number):
# NB: It is safe to use truncation to integer, which is what
# math.trunc does, as Python integers are arbitrary precision and
# so we are guaranteed not to lose precision when we do this
return sympy.Float(math.trunc(float(number)))
class TruncToInt(sympy.Function):
is_integer = True
@classmethod
def eval(cls, number):
# assert number.is_integer is not True, number
if number in (sympy.oo, int_oo):
return int_oo
if number in (-sympy.oo, -int_oo):
return -int_oo
if isinstance(number, sympy.Number):
return sympy.Integer(math.trunc(float(number)))
# This is float -> int
class RoundToInt(sympy.Function):
is_integer = True
@classmethod
def eval(cls, number):
# assert number.is_integer is not True, number
if number is sympy.oo:
return int_oo
if number is -sympy.oo:
return -int_oo
if isinstance(number, sympy.Number):
return sympy.Integer(round(float(number), 0))
# To get float -> int, Python style round semantics.
#
# x = PyFloat_AsDouble(self);
# if (o_ndigits == Py_None) {
# /* single-argument round or with None ndigits:
# * round to nearest integer */
# rounded = round(x);
# if (fabs(x-rounded) == 0.5)
# /* halfway case: round to even */
# rounded = 2.0*round(x/2.0);
# return PyLong_FromDouble(rounded);
# }
# NB: Like Round, this only ever returns floats. ndigits cannot be None
class RoundDecimal(sympy.Function):
is_real = True
@classmethod
def eval(cls, number, ndigits):
# assert number.is_integer is not True, number
if isinstance(number, sympy.Number) and isinstance(ndigits, sympy.Integer):
return sympy.Float(round(float(number), int(ndigits)))
class ToFloat(sympy.Function):
is_real = True
@classmethod
def eval(cls, number):
if number in [sympy.oo, -sympy.oo]:
return number
if isinstance(number, sympy.Integer):
return sympy.Float(int(number))
if number is int_oo:
return sympy.oo
if number is -int_oo:
return -sympy.oo
class Identity(sympy.Function):
"""
Prevents expansion and other optimizations
"""
precedence = 10
def __repr__(self): # type: ignore[override]
return f"Identity({self.args[0]})"
def _eval_is_real(self):
return self.args[0].is_real
def _eval_is_integer(self):
return self.args[0].is_integer # type: ignore[attr-defined]
def _eval_expand_identity(self, **hints):
# Removes the identity op.
return self.args[0]
def make_opaque_unary_fn(name):
class OpaqueUnaryFn(sympy.Function):
"""
Unlike the builtin sympy functions on real numbers like sympy.sqrt,
these equivalents do not do any nontrivial reasoning besides
constant propagation. This helps avoid performing transformations
that are valid for real numbers but are invalid for floating point;
in particular, while we are willing to make optimizations that change
numerics for Tensor compute, we are NOT willing to make optimziations
that change numerics for size compute.
"""
_torch_handler_name = name
@classmethod
def eval(cls, a):
if isinstance(a, (sympy.Integer, sympy.Float)):
# Python converts to float64 before computing, c.f.
# >>> math.sin(2**53+1)
# -0.848925964814655
# >>> math.sin(float(2**53+1))
# -0.848925964814655
try:
return sympy.Float(getattr(math, name)(float(a)))
# Just use sympy semantics for infinity/overflow, you might get some
# weird objects but ask silly questions, get silly answers
except OverflowError:
return getattr(sympy, name)(a)
elif a in [sympy.oo, -sympy.oo, sympy.zoo, -sympy.zoo, int_oo, -int_oo]:
if a is int_oo:
a = sympy.oo
if a is -int_oo:
a = -sympy.oo
if name == "log2":
return sympy.log(a, 2)
return getattr(sympy, name)(a)
return None
nm = "OpaqueUnaryFn_" + name
OpaqueUnaryFn.__name__ = nm
OpaqueUnaryFn.__qualname__ = nm
return OpaqueUnaryFn
# Keep in sync with math_op_names in torch/fx/experimental/sym_node.py
OpaqueUnaryFn_sqrt = make_opaque_unary_fn("sqrt")
OpaqueUnaryFn_cos = make_opaque_unary_fn("cos")
OpaqueUnaryFn_cosh = make_opaque_unary_fn("cosh")
OpaqueUnaryFn_sin = make_opaque_unary_fn("sin")
OpaqueUnaryFn_sinh = make_opaque_unary_fn("sinh")
OpaqueUnaryFn_tan = make_opaque_unary_fn("tan")
OpaqueUnaryFn_tanh = make_opaque_unary_fn("tanh")
OpaqueUnaryFn_asin = make_opaque_unary_fn("asin")
OpaqueUnaryFn_acos = make_opaque_unary_fn("acos")
OpaqueUnaryFn_atan = make_opaque_unary_fn("atan")
OpaqueUnaryFn_exp = make_opaque_unary_fn("exp")
OpaqueUnaryFn_log = make_opaque_unary_fn("log")
OpaqueUnaryFn_asinh = make_opaque_unary_fn("asinh")
OpaqueUnaryFn_log2 = make_opaque_unary_fn("log2")
def make_opaque_bitwise_fn(name, real_op_name):
if name == "bitwise_and":
prec = PRECEDENCE["BitwiseAnd"]
elif name == "bitwise_or":
prec = PRECEDENCE["BitwiseOr"]
else:
raise AssertionError(f"unrecognized {name}")
class BitwiseFn(sympy.Function):
_torch_handler_name = name
precedence: int = prec
@classmethod
def eval(cls, a, b):
if a.is_Boolean and b.is_Boolean:
return getattr(operator, real_op_name)(a, b)
if a.is_Boolean:
a = sympy.Integer(1 if a else 0)
if b.is_Boolean:
b = sympy.Integer(1 if b else 0)
if isinstance(a, (sympy.Integer, int)) and isinstance(
b, (sympy.Integer, int)
):
return sympy.Integer(getattr(operator, real_op_name)(int(a), int(b)))
return None
BitwiseFn.__name__ = "BitwiseFn_" + name
return BitwiseFn
BitwiseFn_bitwise_and = make_opaque_bitwise_fn("bitwise_and", "and_")
BitwiseFn_bitwise_or = make_opaque_bitwise_fn("bitwise_or", "or_")
```
|
====================================================================================================================
SOURCE CODE FILE: interp.py
LINES: 1
SIZE: 7.20 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_sympy\interp.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
"""
This is a simple interpreter for Sympy expressions that dispatches to
classes following the torch._inductor.virtualized calling convention.
For directness, the interpreter takes the handler directly rather than
consulting the TLS. It does not use most of the methods on the full
handler; only those with corresponding Sympy expressions. To see an example
of a full handler, see torch.utils._sympy.value_ranges.ValueRangeAnalysis.
"""
import functools
import logging
from typing import Any, Union
import sympy
from sympy.logic.boolalg import Boolean as SympyBoolean, BooleanAtom
import torch
from .functions import (
BitwiseFn_bitwise_and,
BitwiseFn_bitwise_or,
CeilToInt,
CleanDiv,
FloatPow,
FloatTrueDiv,
FloorDiv,
FloorToInt,
Identity,
IntTrueDiv,
IsNonOverlappingAndDenseIndicator,
Max,
Min,
Mod,
ModularIndexing,
OpaqueUnaryFn_log2,
PowByNatural,
PythonMod,
RoundDecimal,
RoundToInt,
ToFloat,
TruncToFloat,
TruncToInt,
Where,
)
log = logging.getLogger(__name__)
# TODO: Dedupe this with SYMPY_INTERP
@functools.lru_cache(None)
def handlers():
# TODO add CeilDiv (it doesn't appear in the index_expr)
# TODO default to some decompositions if the interpreter doesn't have them
# like decomposing ModularIndexing or implementing Le(a,b) as Ge(b, a)
HANDLERS = {
sympy.Or: "or_",
sympy.And: "and_",
sympy.Eq: "eq",
sympy.Ne: "ne",
sympy.Lt: "lt",
sympy.Gt: "gt",
sympy.Le: "le",
sympy.Ge: "ge",
sympy.Not: "not_",
IntTrueDiv: "int_truediv",
FloatTrueDiv: "truediv",
FloorDiv: "floordiv",
CleanDiv: "floordiv", # TODO: hmm?
TruncToFloat: "trunc",
Where: "where",
sympy.Add: "add",
sympy.Mul: "mul",
FloatPow: "pow",
PowByNatural: "pow_by_natural",
# sympy simplifies x * x into Pow(x, 2), so we need to handle this.
# Do NOT use builtin Pow for floats
# TODO: There is a hazard here, if we have float * float it will
# also get turned into Pow(float, 2) but we don't want this because
# pow_by_natural is assumed to only be integers. Probably the fix is
# to add a FloatMul to impede this optimization
sympy.Pow: "pow_by_natural",
Mod: "mod",
PythonMod: "mod", # TODO: this is wrong
# TODO: Inductor can generate these, but it's ill-specified which
# semantics were intended here. Needs to be cleaned up along with
# FloorDiv in a bigger cleanup
sympy.Mod: "mod",
sympy.Abs: "abs",
sympy.log: "log",
sympy.exp: "exp",
sympy.Min: "minimum",
sympy.Max: "maximum",
Min: "minimum",
Max: "maximum",
ModularIndexing: "modular_indexing",
sympy.functions.elementary.piecewise.ExprCondPair: "expr_cond_pair",
sympy.Piecewise: "piecewise",
Identity: "identity",
IsNonOverlappingAndDenseIndicator: "is_non_overlapping_and_dense_indicator",
RoundDecimal: "round_decimal",
# TODO: do the rest of the opaque unary functions...
OpaqueUnaryFn_log2: "log2",
BitwiseFn_bitwise_and: "bitwise_and",
BitwiseFn_bitwise_or: "bitwise_or",
}
# TODO: This is kind of pointless, we shouldn't be generating sympy.sin
# for these functions, they should be Opaque instead
for name in ["cos", "sin", "tan", "sinh", "cosh", "tanh", "asin", "acos", "atan"]:
HANDLERS[getattr(sympy, name)] = name
return HANDLERS
ASSOCIATIVE_OPS = {"minimum", "maximum", "mul", "add", "and_", "or_"}
def _run_sympy_handler(analysis, args, expr, index_dtype=torch.int64):
# Special cases
if isinstance(expr, sympy.Pow) and isinstance(
expr.args[1], sympy.core.numbers.Half
):
return analysis.sqrt(args[0])
if isinstance(expr, ToFloat):
return analysis.to_dtype(args[0], torch.float64)
# These handlers are special because they take an extra dtype argument
# specifying what they should convert to, and we need to appropriately set
# this up when we convert from Sympy. A reasonable default when you
# are translating is to conservatively do int64, and then narrow these
# arguments later when you discover you can narrow the index range. But
# if you already know that 32-bit indexing is OK, you can directly do the
# sympy translation with index_dtype=torch.int32
INDEX_DTYPE_HANDLERS = {
TruncToInt: "trunc_to_int",
sympy.floor: "floor_to_int",
sympy.ceiling: "ceil_to_int",
FloorToInt: "floor_to_int",
CeilToInt: "ceil_to_int",
RoundToInt: "round_to_int",
}
if (handler_name := INDEX_DTYPE_HANDLERS.get(expr.func)) is not None:
return getattr(analysis, handler_name)(*args, index_dtype)
# Fastpath for n-ary integral addition
if expr.func is sympy.Add and expr.is_integer and hasattr(analysis, "sym_sum"):
r = analysis.sym_sum(args)
log.debug("sym_sum(%s) -> %s", args, r)
return r
if hasattr(expr.func, "_torch_handler_name"):
handler_name = expr.func._torch_handler_name
else:
handler_name = handlers()[expr.func]
handler = getattr(analysis, handler_name)
try:
if handler_name in ASSOCIATIVE_OPS:
assert len(args) > 1
acc = handler(args[0], args[1])
for i in range(2, len(args)):
acc = handler(acc, args[i])
log.debug("%s(%s) -> %s", handler_name, args, acc)
return acc
else:
r = handler(*args)
log.debug("%s(%s) -> %s", handler_name, args, r)
return r
except NotImplementedError:
raise
except Exception:
log.warning("failed while executing %s(%s)", handler_name, args)
raise
_nil = object()
def sympy_interp(
analysis,
env: dict[sympy.Symbol, Any],
expr: Union[sympy.Expr, SympyBoolean],
*,
index_dtype=torch.int64,
missing_handler=None,
):
# Handle base cases
dtype = None
if isinstance(expr, BooleanAtom):
dtype = torch.bool
elif isinstance(expr, sympy.Integer):
dtype = torch.int64
elif isinstance(expr, sympy.Number):
dtype = torch.double
if dtype is not None:
return analysis.constant(expr, dtype)
elif isinstance(expr, sympy.Symbol):
if (r := env.get(expr, _nil)) is not _nil:
return r
elif missing_handler:
return missing_handler(expr)
else:
raise KeyError(expr)
# Recursive case
return _run_sympy_handler(
analysis,
[
sympy_interp(
analysis,
env,
arg,
index_dtype=index_dtype,
missing_handler=missing_handler,
)
for arg in expr.args
], # type: ignore[arg-type]
expr,
index_dtype=index_dtype,
) # type: ignore[arg-type]
```
|
=====================================================================================================================
SOURCE CODE FILE: numbers.py
LINES: 1
SIZE: 11.52 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_sympy\numbers.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import mpmath.libmp as mlib # type: ignore[import-untyped]
import sympy
from sympy import Expr
from sympy.core.decorators import _sympifyit
from sympy.core.expr import AtomicExpr
from sympy.core.numbers import Number
from sympy.core.parameters import global_parameters
from sympy.core.singleton import S, Singleton
class IntInfinity(Number, metaclass=Singleton):
r"""Positive integer infinite quantity.
Integer infinity is a value in an extended integers which
is greater than all other integers. We distinguish it from
sympy's existing notion of infinity in that it reports that
it is_integer.
Infinity is a singleton, and can be accessed by ``S.IntInfinity``,
or can be imported as ``int_oo``.
"""
# NB: We can't actually mark this as infinite, as integer and infinite are
# inconsistent assumptions in sympy. We also report that we are complex,
# different from sympy.oo
is_integer = True
is_commutative = True
is_number = True
is_extended_real = True
is_comparable = True
is_extended_positive = True
is_prime = False
# Ensure we get dispatched to before plain numbers
_op_priority = 100.0
__slots__ = ()
def __new__(cls):
return AtomicExpr.__new__(cls)
def _sympystr(self, printer):
return "int_oo"
def _eval_subs(self, old, new):
if self == old:
return new
# We could do these, not sure about it
"""
def _eval_evalf(self, prec=None):
return Float('inf')
def evalf(self, prec=None, **options):
return self._eval_evalf(prec)
"""
@_sympifyit("other", NotImplemented)
def __add__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other in (S.Infinity, S.NegativeInfinity):
return other
if other in (S.NegativeIntInfinity, S.NaN):
return S.NaN
return self
return Number.__add__(self, other)
__radd__ = __add__
@_sympifyit("other", NotImplemented)
def __sub__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other is S.Infinity:
return S.NegativeInfinity
if other is S.NegativeInfinity:
return S.Infinity
if other in (S.IntInfinity, S.NaN):
return S.NaN
return self
return Number.__sub__(self, other)
@_sympifyit("other", NotImplemented)
def __rsub__(self, other):
return (-self).__add__(other)
@_sympifyit("other", NotImplemented)
def __mul__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other.is_zero or other is S.NaN:
return S.NaN
if other.is_extended_positive:
return self
return S.NegativeIntInfinity
return Number.__mul__(self, other)
__rmul__ = __mul__
@_sympifyit("other", NotImplemented)
def __truediv__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other in (
S.Infinity,
S.IntInfinity,
S.NegativeInfinity,
S.NegativeIntInfinity,
S.NaN,
):
return S.NaN
if other.is_extended_nonnegative:
return S.Infinity # truediv produces float
return S.NegativeInfinity # truediv produces float
return Number.__truediv__(self, other)
def __abs__(self):
return S.IntInfinity
def __neg__(self):
return S.NegativeIntInfinity
def _eval_power(self, expt):
if expt.is_extended_positive:
return S.IntInfinity
if expt.is_extended_negative:
return S.Zero
if expt is S.NaN:
return S.NaN
if expt is S.ComplexInfinity:
return S.NaN
if expt.is_extended_real is False and expt.is_number:
from sympy.functions.elementary.complexes import re
expt_real = re(expt)
if expt_real.is_positive:
return S.ComplexInfinity
if expt_real.is_negative:
return S.Zero
if expt_real.is_zero:
return S.NaN
return self ** expt.evalf()
def _as_mpf_val(self, prec):
return mlib.finf
def __hash__(self):
return super().__hash__()
def __eq__(self, other):
return other is S.IntInfinity
def __ne__(self, other):
return other is not S.IntInfinity
def __gt__(self, other):
if other is S.Infinity:
return sympy.false # sympy.oo > int_oo
elif other is S.IntInfinity:
return sympy.false # consistency with sympy.oo
else:
return sympy.true
def __ge__(self, other):
if other is S.Infinity:
return sympy.false # sympy.oo > int_oo
elif other is S.IntInfinity:
return sympy.true # consistency with sympy.oo
else:
return sympy.true
def __lt__(self, other):
if other is S.Infinity:
return sympy.true # sympy.oo > int_oo
elif other is S.IntInfinity:
return sympy.false # consistency with sympy.oo
else:
return sympy.false
def __le__(self, other):
if other is S.Infinity:
return sympy.true # sympy.oo > int_oo
elif other is S.IntInfinity:
return sympy.true # consistency with sympy.oo
else:
return sympy.false
@_sympifyit("other", NotImplemented)
def __mod__(self, other):
if not isinstance(other, Expr):
return NotImplemented
return S.NaN
__rmod__ = __mod__
def floor(self):
return self
def ceiling(self):
return self
int_oo = S.IntInfinity
class NegativeIntInfinity(Number, metaclass=Singleton):
"""Negative integer infinite quantity.
NegativeInfinity is a singleton, and can be accessed
by ``S.NegativeInfinity``.
See Also
========
IntInfinity
"""
# Ensure we get dispatched to before plain numbers
_op_priority = 100.0
is_integer = True
is_extended_real = True
is_commutative = True
is_comparable = True
is_extended_negative = True
is_number = True
is_prime = False
__slots__ = ()
def __new__(cls):
return AtomicExpr.__new__(cls)
def _eval_subs(self, old, new):
if self == old:
return new
def _sympystr(self, printer):
return "-int_oo"
"""
def _eval_evalf(self, prec=None):
return Float('-inf')
def evalf(self, prec=None, **options):
return self._eval_evalf(prec)
"""
@_sympifyit("other", NotImplemented)
def __add__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other is S.Infinity:
return S.Infinity
if other in (S.IntInfinity, S.NaN):
return S.NaN
return self
return Number.__add__(self, other)
__radd__ = __add__
@_sympifyit("other", NotImplemented)
def __sub__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other is S.NegativeInfinity:
return S.Infinity
if other in (S.NegativeIntInfinity, S.NaN):
return S.NaN
return self
return Number.__sub__(self, other)
@_sympifyit("other", NotImplemented)
def __rsub__(self, other):
return (-self).__add__(other)
@_sympifyit("other", NotImplemented)
def __mul__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other.is_zero or other is S.NaN:
return S.NaN
if other.is_extended_positive:
return self
return S.IntInfinity
return Number.__mul__(self, other)
__rmul__ = __mul__
@_sympifyit("other", NotImplemented)
def __truediv__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other in (
S.Infinity,
S.IntInfinity,
S.NegativeInfinity,
S.NegativeIntInfinity,
S.NaN,
):
return S.NaN
if other.is_extended_nonnegative:
return self
return S.Infinity # truediv returns float
return Number.__truediv__(self, other)
def __abs__(self):
return S.IntInfinity
def __neg__(self):
return S.IntInfinity
def _eval_power(self, expt):
if expt.is_number:
if expt in (
S.NaN,
S.Infinity,
S.NegativeInfinity,
S.IntInfinity,
S.NegativeIntInfinity,
):
return S.NaN
if isinstance(expt, sympy.Integer) and expt.is_extended_positive:
if expt.is_odd:
return S.NegativeIntInfinity
else:
return S.IntInfinity
inf_part = S.IntInfinity**expt
s_part = S.NegativeOne**expt
if inf_part == 0 and s_part.is_finite:
return inf_part
if (
inf_part is S.ComplexInfinity
and s_part.is_finite
and not s_part.is_zero
):
return S.ComplexInfinity
return s_part * inf_part
def _as_mpf_val(self, prec):
return mlib.fninf
def __hash__(self):
return super().__hash__()
def __eq__(self, other):
return other is S.NegativeIntInfinity
def __ne__(self, other):
return other is not S.NegativeIntInfinity
def __gt__(self, other):
if other is S.NegativeInfinity:
return sympy.true # -sympy.oo < -int_oo
elif other is S.NegativeIntInfinity:
return sympy.false # consistency with sympy.oo
else:
return sympy.false
def __ge__(self, other):
if other is S.NegativeInfinity:
return sympy.true # -sympy.oo < -int_oo
elif other is S.NegativeIntInfinity:
return sympy.true # consistency with sympy.oo
else:
return sympy.false
def __lt__(self, other):
if other is S.NegativeInfinity:
return sympy.false # -sympy.oo < -int_oo
elif other is S.NegativeIntInfinity:
return sympy.false # consistency with sympy.oo
else:
return sympy.true
def __le__(self, other):
if other is S.NegativeInfinity:
return sympy.false # -sympy.oo < -int_oo
elif other is S.NegativeIntInfinity:
return sympy.true # consistency with sympy.oo
else:
return sympy.true
@_sympifyit("other", NotImplemented)
def __mod__(self, other):
if not isinstance(other, Expr):
return NotImplemented
return S.NaN
__rmod__ = __mod__
def floor(self):
return self
def ceiling(self):
return self
def as_powers_dict(self):
return {S.NegativeOne: 1, S.IntInfinity: 1}
```
|
======================================================================================================================
SOURCE CODE FILE: printers.py
LINES: 1
SIZE: 19.28 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_sympy\printers.py
ENCODING: utf-8
```py
import sys
from typing import Optional
import sympy
from sympy.printing.precedence import PRECEDENCE, precedence
from sympy.printing.str import StrPrinter
INDEX_TYPE = "int64_t"
# This printer contains rules that are supposed to be generic for both C/C++ and
# Python
class ExprPrinter(StrPrinter):
# override this so that _print_FloorDiv is used
printmethod = "_torch_sympystr"
def _print_Mul(self, expr: sympy.Expr) -> str:
return self.stringify(expr.args, "*", precedence(expr))
def _print_Add(self, expr: sympy.Expr, order: Optional[str] = None) -> str:
return self.stringify(expr.args, " + ", precedence(expr))
def _print_Relational(self, expr: sympy.Expr) -> str:
return self.stringify(expr.args, f" {expr.rel_op} ", precedence(expr))
def _print_BitwiseFn_bitwise_and(self, expr: sympy.Expr) -> str:
return self.stringify(expr.args, " & ", PRECEDENCE["BitwiseAnd"])
def _print_BitwiseFn_bitwise_or(self, expr: sympy.Expr) -> str:
return self.stringify(expr.args, " | ", PRECEDENCE["BitwiseOr"])
# NB: this is OK to put here, because Mod is only defined for positive
# numbers, and so across C/Python its behavior is consistent
def _print_Mod(self, expr: sympy.Expr) -> str:
return self.stringify(expr.args, " % ", PRECEDENCE["Atom"] - 0.5)
def _print_FloatTrueDiv(self, expr: sympy.Expr) -> str:
s = self.stringify(expr.args, " / ", PRECEDENCE["Atom"] - 0.5)
return f"({s})"
def _print_CleanDiv(self, expr: sympy.Expr) -> str:
return self._print_FloorDiv(expr)
def _print_Identity(self, expr: sympy.Expr) -> str:
return self._print(expr.args[0])
# This must be implemented because sympy will collect x * x into Pow(x, 2), without
# any explicit intervention. We print it just like x * x, notably, we
# never generate sympy.Pow with floats.
#
# NB: this pow by natural, you should never have used builtin sympy.pow
# for FloatPow, and a symbolic exponent should be PowByNatural. These
# means exp is guaranteed to be integer.
def _print_Pow(self, expr: sympy.Expr) -> str:
base, exp = expr.args
assert exp == int(exp), exp
exp = int(exp)
assert exp >= 0
if exp > 0:
return self.stringify([base] * exp, "*", PRECEDENCE["Mul"])
return "1"
# Explicit NotImplemented functions are to prevent default sympy printing
# behavior, which will just barf out ToFloat(...) to your IR. The error
# message is better here because it tells you which printer class it needs
# to go in.
def _print_ToFloat(self, expr: sympy.Expr) -> str:
raise NotImplementedError(f"_print_ToFloat not implemented for {type(self)}")
def _print_Infinity(self, expr: sympy.Expr) -> str:
raise NotImplementedError(f"_print_Infinity not implemented for {type(self)}")
def _print_NegativeInfinity(self, expr: sympy.Expr) -> str:
raise NotImplementedError(
f"_print_NegativeInfinity not implemented for {type(self)}"
)
def _print_FloorDiv(self, expr: sympy.Expr) -> str:
raise NotImplementedError(f"_print_FloorDiv not implemented for {type(self)}")
def _print_PythonMod(self, expr: sympy.Expr) -> str:
raise NotImplementedError(f"_print_PythonMod not implemented for {type(self)}")
def _print_IntTrueDiv(self, expr: sympy.Expr) -> str:
raise NotImplementedError(f"_print_IntTrueDiv not implemented for {type(self)}")
def _print_PowByNatural(self, expr: sympy.Expr) -> str:
raise NotImplementedError(
f"_print_PowByNatural not implemented for {type(self)}"
)
def _print_FloatPow(self, expr: sympy.Expr) -> str:
raise NotImplementedError(f"_print_FloatPow not implemented for {type(self)}")
def _print_TruncToInt(self, expr: sympy.Expr) -> str:
raise NotImplementedError(f"_print_TruncToInt not implemented for {type(self)}")
def _print_RoundToInt(self, expr: sympy.Expr) -> str:
raise NotImplementedError(f"_print_RoundToInt not implemented for {type(self)}")
def _print_RoundDecimal(self, expr: sympy.Expr) -> str:
raise NotImplementedError(
f"_print_RoundDecimal not implemented for {type(self)}"
)
# NB: Some float operations are INTENTIONALLY not implemented for
# printers. You can implement them as a quick unblock, but it is better
# to ask yourself why we haven't done this computation in the Tensor
# universe instead
def _print_TruncToFloat(self, expr: sympy.Expr) -> str:
raise NotImplementedError(
f"_print_TruncToFloat not implemented for {type(self)}"
)
class PythonPrinter(ExprPrinter):
def _print_ToFloat(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
# NB: We use sym_float here because the printer is used for cache
# serialization, and cache guards get evaluated with SymInt to
# propagate guards to the parent ShapeEnv. However, this comes at a
# runtime cost for guards involving float. If this is unacceptable
# overhead, what you want to do is have two separate printers for
# SymInt, one for when the inputs are guaranteed to be int, and
# another for when they could be SymInt.
#
# NB: sym_min/sym_max also have this problem, but I chose not to fix
# those.
#
# See https://github.com/pytorch/pytorch/issues/142507 for more
# context.
return f"torch.sym_float({self._print(expr.args[0])})"
def _print_And(self, expr: sympy.Expr) -> str:
return self.stringify(expr.args, " and ", precedence(expr))
def _print_Or(self, expr: sympy.Expr) -> str:
return self.stringify(expr.args, " or ", precedence(expr))
def _print_ModularIndexing(self, expr: sympy.Expr) -> str:
x, div, mod = (
self.parenthesize(arg, PRECEDENCE["Atom"] - 0.5) for arg in expr.args
)
if div != "1":
x = f"({x} // {div})"
return f"({x} % {mod})"
def _print_Infinity(self, expr: sympy.Expr) -> str:
return "math.inf"
def _print_NegativeInfinity(self, expr: sympy.Expr) -> str:
return "-math.inf"
# WARNING: this is dangerous for Triton, which has C-style modulus
def _print_PythonMod(self, expr: sympy.Expr) -> str:
return self.stringify(expr.args, " % ", PRECEDENCE["Atom"] - 0.5)
# WARNING: this is dangerous for Triton, which has C-style modulus
def _print_FloorDiv(self, expr: sympy.Expr) -> str:
x, div = (self.parenthesize(arg, PRECEDENCE["Atom"] - 0.5) for arg in expr.args)
return f"{x} // {div}"
# WARNING: this is dangerous for Triton, when lhs, rhs > 2**53, Python
# does a special algorithm
def _print_IntTrueDiv(self, expr: sympy.Expr) -> str:
return self.stringify(expr.args, " / ", PRECEDENCE["Atom"] - 0.5)
def _helper_sqrt(self, expr: sympy.Expr) -> str:
return f"math.sqrt({self._print(expr)})"
def _print_OpaqueUnaryFn_sqrt(self, expr: sympy.Expr) -> str:
return self._helper_sqrt(expr.args[0])
def _print_FloatPow(self, expr: sympy.Expr) -> str:
return self.stringify(expr.args, " ** ", PRECEDENCE["Pow"])
# TODO: Not sure this works with Triton, even when base/exp are integral
def _print_PowByNatural(self, expr: sympy.Expr) -> str:
return self.stringify(expr.args, " ** ", PRECEDENCE["Pow"])
def _print_floor(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"math.floor({self._print(expr.args[0])})"
def _print_FloorToInt(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"math.floor({self._print(expr.args[0])})"
def _print_TruncToInt(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
# This also could have been int(), they'll do the same thing for float
return f"math.trunc({self._print(expr.args[0])})"
def _print_ceiling(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"math.ceil({self._print(expr.args[0])})"
def _print_CeilToInt(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"math.ceil({self._print(expr.args[0])})"
def _print_Abs(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"abs({self._print(expr.args[0])})"
# NB: It's expected that we've made explicit any promotion in the sympy
# expression, so it doesn't matter that Python max/min doesn't perform
# promotion
def _print_Max(self, expr: sympy.Expr) -> str:
assert len(expr.args) >= 2
return f"max({', '.join(map(self._print, expr.args))})"
def _print_Min(self, expr: sympy.Expr) -> str:
assert len(expr.args) >= 2
return f"min({', '.join(map(self._print, expr.args))})"
def _print_OpaqueUnaryFn_cos(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"math.cos({self._print(expr.args[0])})"
def _print_OpaqueUnaryFn_cosh(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"math.cosh({self._print(expr.args[0])})"
def _print_OpaqueUnaryFn_acos(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"math.acos({self._print(expr.args[0])})"
def _print_OpaqueUnaryFn_sin(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"math.sin({self._print(expr.args[0])})"
def _print_OpaqueUnaryFn_sinh(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"math.sinh({self._print(expr.args[0])})"
def _print_OpaqueUnaryFn_asin(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"math.asin({self._print(expr.args[0])})"
def _print_OpaqueUnaryFn_tan(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"math.tan({self._print(expr.args[0])})"
def _print_OpaqueUnaryFn_tanh(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"math.tanh({self._print(expr.args[0])})"
def _print_OpaqueUnaryFn_atan(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"math.atan({self._print(expr.args[0])})"
def _print_RoundToInt(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"round({self._print(expr.args[0])})"
def _print_RoundDecimal(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 2
number, ndigits = expr.args
assert isinstance(ndigits, sympy.Integer)
return f"round({self._print(number)}, {ndigits})"
class CppPrinter(ExprPrinter):
def _print_Integer(self, expr: sympy.Expr) -> str:
return (
f"{int(expr)}LL" if sys.platform in ["darwin", "win32"] else f"{int(expr)}L"
)
def _print_Where(self, expr: sympy.Expr) -> str:
c, p, q = (
self.parenthesize(arg, PRECEDENCE["Atom"] - 0.5) for arg in expr.args
)
return f"{c} ? {p} : {q}"
def _print_ModularIndexing(self, expr: sympy.Expr) -> str:
x, div, mod = expr.args
x = self.doprint(x)
if div != 1:
div = self.doprint(div)
if expr.is_integer:
x = f"c10::div_floor_integer(static_cast<int64_t>({x}), static_cast<int64_t>({div}))"
else:
x = f"c10::div_floor_floating(static_cast<double>({x}), static_cast<double>({div}))"
mod = self.doprint(mod)
return f"(static_cast<{INDEX_TYPE}>({x}) % static_cast<{INDEX_TYPE}>({mod}))"
def _print_FloorDiv(self, expr: sympy.Expr) -> str:
x, div = expr.args
x = self.doprint(x)
div = self.doprint(div)
if expr.is_integer:
return f"c10::div_floor_integer(static_cast<int64_t>({x}), static_cast<int64_t>({div}))"
return f"c10::div_floor_floating(static_cast<double>({x}), static_cast<double>({div}))"
def _print_floor(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
r = f"std::floor({self._print(expr.args[0])})"
return f"static_cast<{INDEX_TYPE}>({r})" if expr.is_integer else r
def _print_FloorToInt(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
r = f"std::floor({self._print(expr.args[0])})"
return f"static_cast<{INDEX_TYPE}>({r})" if expr.is_integer else r
def _print_TruncToInt(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
r = f"std::trunc({self._print(expr.args[0])})"
return f"static_cast<{INDEX_TYPE}>({r})"
def _print_TruncToFloat(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"std::trunc({self._print(expr.args[0])})"
def _print_ToFloat(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"static_cast<double>({self._print(expr.args[0])})"
def _print_PythonMod(self, expr: sympy.Expr) -> str:
x, div = expr.args
x = self.doprint(x)
div = self.doprint(div)
return f"c10::div_mod({x}, {div})"
def _print_IntTrueDiv(self, expr: sympy.Expr) -> str:
lhs, rhs = expr.args
# TODO: This is only accurate up to 2**53
return f"static_cast<double>({self._print(lhs)}) / static_cast<double>({self._print(rhs)})"
# TODO: PowByNatural: we need to implement our own int-int pow. Do NOT
# use std::pow, that operates on floats
def _print_PowByNatural(self, expr: sympy.Expr) -> str:
raise NotImplementedError(
f"_print_PowByNatural not implemented for {type(self)}"
)
def _print_FloatPow(self, expr: sympy.Expr) -> str:
base, exp = expr.args
return f"std::pow({self._print(base)}, {self._print(exp)})"
def _print_Pow(self, expr: sympy.Expr) -> str:
# Uses float constants to perform FP div
base, exp = expr.args
if exp == 0.5 or exp == -0.5:
base = self._print(base)
return f"std::sqrt({base})" if exp == 0.5 else f"1.0/std::sqrt({base})"
if exp.is_integer:
exp = int(exp)
if exp > 0:
r = self.stringify([base] * exp, "*", PRECEDENCE["Mul"])
elif exp < -1:
r = (
"1.0/("
+ self.stringify([base] * abs(exp), "*", PRECEDENCE["Mul"])
+ ")"
)
elif exp == -1:
r = "1.0/" + self._print(base)
else: # exp == 0
r = "1.0"
return f"static_cast<{INDEX_TYPE}>({r})" if expr.is_integer else r
else:
# TODO: float vs double
return f"std::pow({base}, {float(exp)})"
def _print_Rational(self, expr: sympy.Expr) -> str:
# Uses float constants to perform FP div
if expr.q == 1:
r = f"{expr.p}"
else:
r = f"{expr.p}.0/{expr.q}.0"
return f"static_cast<{INDEX_TYPE}>({r})" if expr.is_integer else r
def _print_ceiling(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
r = f"std::ceil({self._print(expr.args[0])})"
return f"static_cast<{INDEX_TYPE}>({r})" if expr.is_integer else r
def _print_CeilToInt(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
r = f"std::ceil({self._print(expr.args[0])})"
return f"static_cast<{INDEX_TYPE}>({r})" if expr.is_integer else r
def _print_Min(self, expr: sympy.Expr) -> str:
args = [self._print(a) for a in expr.args]
if len(args) == 2:
return f"std::min(static_cast<{INDEX_TYPE}>({args[0]}), static_cast<{INDEX_TYPE}>({args[1]}))"
else:
# Initializer list overload
il = "{" + ", ".join(args) + "}"
return f"std::min({il})"
def _print_Max(self, expr: sympy.Expr) -> str:
args = [self._print(a) for a in expr.args]
if len(args) == 2:
return f"std::max(static_cast<{INDEX_TYPE}>({args[0]}), static_cast<{INDEX_TYPE}>({args[1]}))"
else:
# Initializer list overload
il = "{" + ", ".join(args) + "}"
return f"std::max({il})"
def _print_Abs(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"std::abs({self._print(expr.args[0])})"
def _print_OpaqueUnaryFn_cos(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"std::cos({self._print(expr.args[0])})"
def _print_OpaqueUnaryFn_cosh(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"std::cosh({self._print(expr.args[0])})"
def _print_OpaqueUnaryFn_acos(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"std::acos({self._print(expr.args[0])})"
def _print_OpaqueUnaryFn_sin(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"std::sin({self._print(expr.args[0])})"
def _print_OpaqueUnaryFn_sinh(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"std::sinh({self._print(expr.args[0])})"
def _print_OpaqueUnaryFn_asin(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"std::asin({self._print(expr.args[0])})"
def _print_OpaqueUnaryFn_tan(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"std::tan({self._print(expr.args[0])})"
def _print_OpaqueUnaryFn_tanh(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"std::tanh({self._print(expr.args[0])})"
def _print_OpaqueUnaryFn_atan(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"std::atan({self._print(expr.args[0])})"
def _print_OpaqueUnaryFn_sqrt(self, expr: sympy.Expr) -> str:
return f"std::sqrt({self._print(expr.args[0])})"
def _print_RoundToInt(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
# TODO: dispatch to llrint depending on index type
return f"std::lrint({self._print(expr.args[0])})"
def _print_RoundDecimal(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 2
number, ndigits = expr.args
if number.is_integer:
# ndigits < 0 should have been filtered by the sympy function
assert ndigits < 0
raise ValueError(
f"For integer inputs, only non-negative ndigits are currently supported, but got {ndigits}."
)
number_str = self.parenthesize(number, PRECEDENCE["Mul"])
return f"static_cast<double>(std::nearbyint(1e{ndigits} * {number_str}) * 1e{-ndigits})"
def _print_BooleanTrue(self, expr: sympy.Expr) -> str:
return "true"
def _print_BooleanFalse(self, expr: sympy.Expr) -> str:
return "false"
def _print_Infinity(self, expr: sympy.Expr) -> str:
return "std::numeric_limits<double>::infinity()"
def _print_NegativeInfinity(self, expr: sympy.Expr) -> str:
return f"-{self._print_Infinity(expr)}"
```
|
=======================================================================================================================
SOURCE CODE FILE: reference.py
LINES: 1
SIZE: 13.85 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_sympy\reference.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import math
import operator
from typing import Union
import sympy
import torch
from torch.utils._sympy.functions import (
_keep_float,
BitwiseFn_bitwise_and,
BitwiseFn_bitwise_or,
FloatPow,
FloatTrueDiv,
FloorDiv,
IntTrueDiv,
Max,
Min,
Mod,
OpaqueUnaryFn_exp,
OpaqueUnaryFn_log,
OpaqueUnaryFn_log2,
OpaqueUnaryFn_sqrt,
PowByNatural,
RoundDecimal,
RoundToInt,
ToFloat,
TruncToInt,
)
# The sympy interpretation of operators. It will also sometimes work with
# plain int/float, but if you do certain operations you will get out a
# sympy.Basic in the end. If you want the Python/FX traceable interpretation,
# check PythonReferenceAnalysis.
# NB: For magic methods this needs to use normal magic methods
# so that test_magic_methods works
class ReferenceAnalysis:
@staticmethod
def constant(c, dtype):
return sympy.sympify(c)
@staticmethod
def or_(a, b):
return a | b
@staticmethod
def and_(a, b):
return a & b
@staticmethod
def eq(a, b):
if isinstance(a, sympy.Expr) or isinstance(b, sympy.Expr):
return sympy.Eq(a, b)
return a == b
@classmethod
def ne(cls, a, b):
return cls.not_(cls.eq(a, b))
@staticmethod
def lt(a, b):
return a < b
@staticmethod
def gt(a, b):
return a > b
@staticmethod
def le(a, b):
return a <= b
@staticmethod
def ge(a, b):
return a >= b
@staticmethod
def not_(a):
assert not isinstance(a, bool)
return ~a
@staticmethod
def reciprocal(x):
return FloatTrueDiv(1.0, x)
@staticmethod
def square(x):
return PowByNatural(x, 2)
@staticmethod
def trunc_to_int(x, dtype):
return TruncToInt(x)
@staticmethod
def ceil_to_int(x, dtype):
return sympy.ceiling(x)
@staticmethod
def floor_to_int(x, dtype):
return sympy.floor(x)
@staticmethod
def floor(x):
return _keep_float(sympy.floor)(x)
@staticmethod
def ceil(x):
return _keep_float(sympy.ceiling)(x)
@staticmethod
def to_dtype(x, dtype):
if dtype == torch.float64:
return ToFloat(x)
raise NotImplementedError(f"to_dtype {dtype} NYI")
@staticmethod
def mod(x, y):
return Mod(x, y)
@staticmethod
def abs(x):
return abs(x)
@staticmethod
def neg(x):
return -x
@staticmethod
def truediv(a, b):
return FloatTrueDiv(a, b)
@staticmethod
def int_truediv(a, b):
return IntTrueDiv(a, b)
@staticmethod
def floordiv(a, b):
return FloorDiv(a, b)
@staticmethod
def truncdiv(a, b):
raise NotImplementedError("TODO: truncdiv")
@staticmethod
def add(a, b):
return _keep_float(operator.add)(a, b)
@classmethod
def sym_sum(cls, args):
return sympy.Add(*args)
@staticmethod
def mul(a, b):
return _keep_float(operator.mul)(a, b)
@staticmethod
def sub(a, b):
return _keep_float(operator.sub)(a, b)
@staticmethod
def exp(x):
return OpaqueUnaryFn_exp(x)
@staticmethod
def log(x):
return OpaqueUnaryFn_log(x)
@staticmethod
def log2(x):
return OpaqueUnaryFn_log2(x)
@staticmethod
def sqrt(x):
return OpaqueUnaryFn_sqrt(x)
@staticmethod
def pow(a, b):
return _keep_float(FloatPow)(a, b)
@staticmethod
def pow_by_natural(a, b):
return PowByNatural(a, b)
@staticmethod
def minimum(a, b):
return Min(a, b)
@staticmethod
def maximum(a, b):
return Max(a, b)
@staticmethod
def round_to_int(a, dtype):
return RoundToInt(a)
@staticmethod
def round_decimal(a, b):
return RoundDecimal(a, b)
@staticmethod
def bitwise_and(a, b):
return BitwiseFn_bitwise_and(a, b)
@staticmethod
def bitwise_or(a, b):
return BitwiseFn_bitwise_or(a, b)
# Unlike ReferenceAnalysis, does NOT sympyify, instead, works with plain
# Python types and is FX traceable. Inheritance here is purely for code
# sharing (TODO: considering splitting out a BaseReferenceAnalysis).
class PythonReferenceAnalysis(ReferenceAnalysis):
@staticmethod
def constant(c, dtype):
if dtype is torch.int64:
return int(c)
elif dtype is torch.double:
return float(c)
elif dtype is torch.bool:
return bool(c)
else:
raise AssertionError(f"unrecognized dtype {dtype}")
@staticmethod
def not_(a):
return torch.sym_not(a)
@classmethod
def sym_sum(cls, args):
if len(args) == 0:
return 0
if len(args) == 1:
return args[0]
acc = cls.add(args[0], args[1])
for i in range(2, len(args)):
acc = cls.add(acc, args[i])
return acc
@staticmethod
def floordiv(a, b):
return a // b
@staticmethod
def mod(x, y):
return x % y
@staticmethod
def truncdiv(a, b):
return a / b
@staticmethod
def to_dtype(x, dtype):
if dtype == torch.float64:
return torch.sym_float(x)
raise NotImplementedError(f"to_dtype {dtype} NYI")
@staticmethod
def exp(x):
raise AssertionError("exp is not valid shape sympy expr")
@staticmethod
def log(x):
raise AssertionError("log is not valid shape sympy expr")
@staticmethod
def log2(x):
return torch._sym_log2(x) # type: ignore[attr-defined]
@staticmethod
def sqrt(x):
return torch._sym_sqrt(x) # type: ignore[attr-defined]
@staticmethod
def minimum(a, b):
return torch.sym_min(a, b)
@staticmethod
def maximum(a, b):
return torch.sym_max(a, b)
@staticmethod
def floor_to_int(x, dtype):
return math.floor(x)
@staticmethod
def ceil_to_int(x, dtype):
return math.ceil(x)
@staticmethod
def floor(x):
return float(math.floor(x))
@staticmethod
def ceil(x):
return float(math.ceil(x))
@staticmethod
def truediv(a, b):
return a / b
@staticmethod
def pow(a, b):
return a**b
@staticmethod
def pow_by_natural(a, b):
# Pray that safe_pow is not needed here lol. In particular, this
# never participates in VR low/high ranges, so overflow should be
# unlikely
return a**b
@staticmethod
def round_to_int(a, dtype):
return round(a)
@staticmethod
def round_decimal(a, b):
return round(a, ndigits=b)
@staticmethod
def bitwise_and(a, b):
return a & b
@staticmethod
def bitwise_or(a, b):
return a | b
# Like PythonReferenceAnalysis, but some export-unfriendly choices of
# operators to make things faster
class OptimizedPythonReferenceAnalysis(PythonReferenceAnalysis):
@staticmethod
def sym_sum(args):
return torch.sym_sum(args)
def _to_dtype(x: torch.Tensor, dtype: torch.dtype) -> torch.Tensor:
return torch.ops.prims.convert_element_type.default(x, dtype)
# Suppose we have some int/float arguments. This diagram commutes:
#
# int/float -- PythonReferenceAnalysis.op --> int/float
# | |
# | |
# torch.tensor(..., dtype=torch.int64/torch.float64)
# | |
# V V
# Tensor -- TensorReferenceAnalysis.op --> Tensor
#
# NB: int before and after must be representable in int64 (we will
# insert guards accordingly.)
#
# This is guaranteed to be FX traceable with OpOverloads only.
class TensorReferenceAnalysis:
# NB: This is actually dead, because with Proxy tracing the factory
# function isn't traced correctly. Here for completeness.
@staticmethod
def constant(c, dtype):
d: Union[int, float, bool]
if dtype is torch.int64:
d = int(c)
elif dtype is torch.double:
d = float(c)
elif dtype is torch.bool:
d = bool(c)
else:
raise AssertionError(f"unrecognized dtype {dtype}")
return torch.ops.aten.scalar_tensor.default(d, dtype=dtype)
@staticmethod
def or_(a, b):
return torch.ops.aten.logical_or.default(a, b)
@staticmethod
def and_(a, b):
return torch.ops.aten.logical_and.default(a, b)
@staticmethod
def bitwise_and(a, b):
return torch.ops.aten.bitwise_and(a, b)
@staticmethod
def bitwise_or(a, b):
return torch.ops.aten.bitwise_or(a, b)
@staticmethod
def eq(a, b):
return torch.ops.aten.eq.Tensor(a, b)
@classmethod
def ne(cls, a, b):
return torch.ops.aten.ne.Tensor(a, b)
@staticmethod
def lt(a, b):
return torch.ops.aten.lt.Tensor(a, b)
@staticmethod
def gt(a, b):
return torch.ops.aten.gt.Tensor(a, b)
@staticmethod
def le(a, b):
return torch.ops.aten.le.Tensor(a, b)
@staticmethod
def ge(a, b):
return torch.ops.aten.ge.Tensor(a, b)
@staticmethod
def not_(a):
return torch.ops.aten.logical_not.default(a)
@staticmethod
def reciprocal(x):
return torch.ops.aten.reciprocal.default(x)
@staticmethod
def square(x):
# TODO: maybe composite implicit autograd doesn't work here?
return torch.ops.aten.square.default(x)
@staticmethod
def trunc_to_int(x, dtype):
return _to_dtype(torch.ops.aten.trunc.default(x), dtype)
@staticmethod
def ceil_to_int(x, dtype):
return _to_dtype(torch.ops.aten.ceil.default(x), dtype)
@staticmethod
def floor_to_int(x, dtype):
return _to_dtype(torch.ops.aten.floor.default(x), dtype)
@staticmethod
def floor(x):
return torch.ops.aten.floor.default(x)
@staticmethod
def ceil(x):
return torch.ops.aten.ceil.default(x)
@staticmethod
def to_dtype(x, dtype):
return _to_dtype(x, dtype)
@staticmethod
def mod(x, y):
# TODO: https://github.com/pytorch/pytorch/pull/133654
raise NotImplementedError(
"no C-style modulus operation available from frontend atm"
)
@staticmethod
def abs(x):
return torch.ops.aten.abs.default(x)
@staticmethod
def neg(x):
return torch.ops.aten.neg.default(x)
@staticmethod
def truediv(a, b):
return torch.ops.aten.true_divide.Tensor(a, b)
@staticmethod
def int_truediv(a, b):
raise NotImplementedError(
"Python int truediv difficult to implement in PyTorch atm"
)
# TODO: This is wrong, CPython has a custom implementation of true
# division that results in higher precision when the floats are
# sufficiently large. Short term fix: add a guard here
return torch.ops.aten.true_divide.default(
_to_dtype(a, torch.float64), _to_dtype(b, torch.float64)
)
@staticmethod
def floordiv(a, b):
return torch.ops.aten.div.Tensor_mode(a, b, rounding_mode="floor")
@staticmethod
def truncdiv(a, b):
raise NotImplementedError(
"no C-style truncdiv operation available from frontend atm"
)
@staticmethod
def add(a, b):
return torch.ops.aten.add.Tensor(a, b)
@staticmethod
def mul(a, b):
return torch.ops.aten.mul.Tensor(a, b)
@staticmethod
def sub(a, b):
return torch.ops.aten.sub.Tensor(a, b)
@staticmethod
def exp(x):
return torch.ops.aten.exp.default(x)
@staticmethod
def log(x):
return torch.ops.aten.log.default(x)
@staticmethod
def log2(x):
return torch.ops.aten.log2.default(x)
@staticmethod
def sqrt(x):
return torch.ops.aten.sqrt.default(x)
@staticmethod
def sin(x):
return torch.ops.aten.sin.default(x)
@staticmethod
def cos(x):
return torch.ops.aten.cos.default(x)
@staticmethod
def tanh(x):
return torch.ops.aten.tanh.default(x)
@staticmethod
def sinh(x):
return torch.ops.aten.sinh.default(x)
@staticmethod
def cosh(x):
return torch.ops.aten.cosh.default(x)
@staticmethod
def tan(x):
return torch.ops.aten.tan.default(x)
@staticmethod
def acos(x):
return torch.ops.aten.acos.default(x)
@staticmethod
def atan(x):
return torch.ops.aten.atan.default(x)
@staticmethod
def asin(x):
return torch.ops.aten.asin.default(x)
@staticmethod
def pow(a, b):
return torch.ops.aten.pow.Tensor_Tensor(a, b)
@staticmethod
def pow_by_natural(a, b):
# NB: pow handles int x int fine
return torch.ops.aten.pow.Tensor_Tensor(a, b)
@staticmethod
def minimum(a, b):
return torch.ops.aten.minimum.default(a, b)
@staticmethod
def maximum(a, b):
return torch.ops.aten.maximum.default(a, b)
@staticmethod
def round_to_int(a, dtype):
return torch.ops.aten.round.default(a)
@staticmethod
def round_decimal(a, b):
raise NotImplementedError(
"round decimal doesn't support Tensor second argument atm"
)
# return torch.ops.aten.round.decimals(a, b)
```
|
===========================================================================================================================
SOURCE CODE FILE: singleton_int.py
LINES: 1
SIZE: 2.99 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_sympy\singleton_int.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import sympy
from sympy.multipledispatch import dispatch
__all__ = ["SingletonInt"]
class SingletonInt(sympy.AtomicExpr):
# This is probably not super important unless we are in multiple dispatch
# situations with other more exotic Expr types.
_op_priority = 99999
def __new__(cls, *args, coeff=None, **kwargs):
instance = super().__new__(cls, *args, **kwargs)
return instance
# The semantics of this class should match that of NestedIntSymNodeImpl in
# c10/core/NestedIntSymNodeImpl.h
def __init__(self, val, *, coeff=1):
self._val = val
self._coeff = coeff
super().__init__()
# See NOTE [ Inequalities with nested int ]
def _eval_Eq(self, other):
if (
isinstance(other, SingletonInt)
and other._val == self._val
and self._coeff == other._coeff
):
return sympy.true
else:
return sympy.false
# This is necessary so that calling expr.free_symbols on exprs that contain
# this Singleton does not error
@property
def free_symbols(self):
return set()
def __mul__(self, other):
if isinstance(other, SingletonInt):
raise ValueError(
"SingletonInt cannot be multiplied by another SingletonInt"
)
return SingletonInt(self._val, coeff=self._coeff * other)
def __rmul__(self, other):
if isinstance(other, SingletonInt):
raise ValueError(
"SingletonInt cannot be multiplied by another SingletonInt"
)
return SingletonInt(self._val, coeff=self._coeff * other)
# Make sure we promptly raise an error instead of falling back to building
# an expression tree. There are probably more ops, how can we be exhaustive?
def __add__(self, other):
raise NotImplementedError("NYI")
def __sub__(self, other):
raise NotImplementedError("NYI")
def __truediv__(self, other):
raise NotImplementedError("NYI")
def __floordiv__(self, other):
raise NotImplementedError("NYI")
def __mod__(self, other):
raise NotImplementedError("NYI")
# See NOTE [ Inequalities with nested int ]
@dispatch(sympy.Integer, SingletonInt)
def _eval_is_ge(a, b):
if a < 2:
return sympy.false
raise ValueError("Symbolic SingletonInt: Relation is indeterminate")
@dispatch(SingletonInt, sympy.Integer) # type: ignore[no-redef]
def _eval_is_ge(a, b): # noqa: F811
if b <= 2:
return sympy.true
raise ValueError("Symbolic SingletonInt: Relation is indeterminate")
@dispatch(SingletonInt, SingletonInt) # type: ignore[no-redef]
def _eval_is_ge(a, b): # noqa: F811
if a._val == b._val:
if a._coeff >= b._coeff:
return sympy.true
else:
return sympy.false
raise ValueError("Symbolic SingletonInt: Relation is indeterminate")
```
|
===================================================================================================================
SOURCE CODE FILE: solve.py
LINES: 1
SIZE: 6.53 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_sympy\solve.py
ENCODING: utf-8
```py
import logging
from typing import Optional
import sympy
from torch.utils._sympy.functions import FloorDiv
log = logging.getLogger(__name__)
_MIRROR_REL_OP: dict[type[sympy.Basic], type[sympy.Rel]] = {
sympy.Eq: sympy.Eq,
sympy.Ne: sympy.Ne,
sympy.Ge: sympy.Le,
sympy.Gt: sympy.Lt,
sympy.Le: sympy.Ge,
sympy.Lt: sympy.Gt,
}
INEQUALITY_TYPES = (sympy.Gt, sympy.Ge, sympy.Lt, sympy.Le)
def mirror_rel_op(type: type) -> Optional[type[sympy.Rel]]:
return _MIRROR_REL_OP.get(type, None)
# Tries to simplify 'expr', so as to leave only 'thing' in the left-hand side.
#
# Returns a tuple of:
# 1. The simplified expression
# 2. The expression on the right-hand side
#
# Returns 'None' if it can't reach a state where the only thing in the left
# hand side is 'thing'.
#
# 'trials': number of times 'try_solve' will try to isolate 'thing' to the
# left-hand side.
#
# 'floordiv_inequality': flag to enable conversion of 'FloorDiv' into
# inequalities.
def try_solve(
expr: sympy.Basic,
thing: sympy.Basic,
trials: int = 5,
floordiv_inequality: bool = True,
) -> Optional[tuple[sympy.Rel, sympy.Expr]]:
mirror = mirror_rel_op(type(expr))
# Ignore unsupported expressions:
# - Those that are not relational operations
# - Those that don't have a mirror (just avoiding unexpected classes)
if not isinstance(expr, sympy.Rel) or mirror is None:
log.debug("expression with unsupported type: %s", type(expr))
return None
lhs_has_thing = expr.lhs.has(thing)
rhs_has_thing = expr.rhs.has(thing)
# Give up when 'thing' appears on both sides of the relational expression.
# That is because, as is, we assume the thing we are trying to isolate is
# only on the right-hand side.
if lhs_has_thing and rhs_has_thing:
log.debug("thing (%s) found in both sides of expression: %s", thing, expr)
return None
# Try considering both LHS and RHS by mirroring the original expression:
# a < b ==> b > a
expressions = []
# Add each version of 'expr' if 'thing' is in its left-hand side.
if lhs_has_thing:
expressions.append(expr)
if rhs_has_thing:
expressions.append(mirror(expr.rhs, expr.lhs))
for e in expressions:
if e is None:
continue
assert isinstance(e, sympy.Rel)
for _ in range(trials):
trial = _try_isolate_lhs(e, thing, floordiv_inequality=floordiv_inequality)
# Stop if there was no change in this trial.
if trial == e:
break
e = trial # type: ignore[assignment]
# Return if we were able to isolate 'thing' on the left-hand side.
if isinstance(e, sympy.Rel) and e.lhs == thing:
log.debug("solved: %s ---> %s", expr, e)
return e, e.rhs
return None
def _try_isolate_lhs(
e: sympy.Basic, thing: sympy.Basic, floordiv_inequality: bool
) -> sympy.Basic:
op = type(e)
if isinstance(e, sympy.Rel):
# Move any constants in the left-hand side to the right-hand side.
lhs_not_thing = (
sum(a for a in e.lhs.args if not a.has(thing))
if isinstance(e.lhs, sympy.Add)
else 0
)
e = op(e.lhs - lhs_not_thing, e.rhs - lhs_not_thing) # type: ignore[attr-defined]
# Divide both sides by the factors that don't contain thing.
if isinstance(e, sympy.Rel) and isinstance(e.lhs, sympy.Mul):
lhs, rhs = e.args
other = sympy.Mul(*[a for a in lhs.args if not a.has(thing)])
# If we can't tell whether 'other' is negative or positive, we do nothing.
# That is because we don't know whether we have mirror the operation or not.
# We also divide only when we know 'rhs' is not zero.
if not (isinstance(e, INEQUALITY_TYPES) and other.is_negative is None) and not (
not isinstance(e, INEQUALITY_TYPES) and rhs.is_zero
):
# Divide both sides by 'other'.
lhs = lhs / other
rhs = rhs / other
# If 'e' is an inequality and 'other' is negative, we have to
# mirror the expression.
if isinstance(e, INEQUALITY_TYPES) and other.is_negative:
op = mirror_rel_op(op) # type: ignore[assignment]
assert op is not None
e = op(lhs, rhs)
################################################################################
# left-hand side is FloorDiv
################################################################################
#
# Given the expression: a // b op c
# where 'op' is a relational operation, these rules only work if:
# - b > 0
# - c is an integer
if (
floordiv_inequality
and isinstance(e, sympy.Rel)
and isinstance(e.lhs, FloorDiv)
and e.lhs.divisor.is_positive
and e.rhs.is_integer
):
# a // b == expr
# => a >= (b * expr) and a < (b * (expr + 1))
if isinstance(e, sympy.Eq):
numerator, denominator = e.lhs.args
return sympy.And(
sympy.Ge(numerator, (e.rhs * denominator)), # type: ignore[arg-type]
sympy.Lt(numerator, ((e.rhs + 1) * denominator)), # type: ignore[arg-type]
)
# a // b != expr
# => a < (b * expr) or a >= (b * (expr + 1))
if isinstance(e, sympy.Ne):
numerator, denominator = e.lhs.args
return sympy.Or(
sympy.Lt(numerator, (e.rhs * denominator)), # type: ignore[arg-type]
sympy.Ge(numerator, ((e.rhs + 1) * denominator)), # type: ignore[arg-type]
)
# The transformations below only work if b is positive.
# Note: we only have this information for constants.
# a // b > expr => a >= b * (expr + 1)
# a // b >= expr => a >= b * expr
if isinstance(e, (sympy.Gt, sympy.Ge)):
quotient = e.rhs if isinstance(e, sympy.Ge) else (e.rhs + 1) # type: ignore[arg-type]
return sympy.Ge(e.lhs.args[0], (quotient * e.lhs.args[1])) # type: ignore[arg-type]
# a // b < expr => a < b * expr
# a // b <= expr => a < b * (expr + 1)
if isinstance(e, (sympy.Lt, sympy.Le)):
quotient = e.rhs if isinstance(e, sympy.Lt) else (e.rhs + 1) # type: ignore[arg-type]
return sympy.Lt(e.lhs.args[0], (quotient * e.lhs.args[1])) # type: ignore[arg-type]
return e
```
|
====================================================================================================================
SOURCE CODE FILE: symbol.py
LINES: 1
SIZE: 3.73 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_sympy\symbol.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
"""
This file contains canonical definitions for our symbol naming conventions,
across torch.fx.experimental.symbolic_shapes and torch._inductor. The
intention is:
1. To make it easily greppable where all the sites we use a prefix are
2. Make it possible to easily tell if we can introduce a new prefix without
introducing a conflict
You can occasionally test if prefixes have been hardcoded by renaming prefixes
in this file and seeing what breaks.
"""
from collections.abc import Iterable
from enum import auto, Enum
from typing import Union
import sympy
class SymT(Enum):
SIZE = auto()
FLOAT = auto()
UNBACKED_INT = auto()
UNBACKED_FLOAT = auto()
# Inductor: The intermediates in inner_fn tmp0, one generated per ops call.
# If one of these shows up in an indexing expression, that means an
# indirect load is happening.
TMP = auto()
# Inductor: Placeholder variable that is later replaced with TMP
INDIRECT = auto()
# Inductor: Some size expressions are replaced with a precomputed size ps0
# which is computed host side, and then directly reused in the kernel, so
# we don't repeatedly recompute it on device.
PRECOMPUTED_SIZE = auto()
# Inductor: An indexing variable i0 in loops IR which ranges over non-reduced
# dim in the loop
INDEX = auto()
# Inductor: A reduction indexing (r0, r1) variables in loops IR which ranges over
# reduced dim(s) in the loop
R0_INDEX = auto()
R1_INDEX = auto()
# Inductor: In templated kernels torch._inductor.kernel, we have a hook to
# store the final output and append epilogue fusions. To do this, we must
# know what the indexes the outputs range over. NB: These will also
# advertise as INDEX, this is... probably OK?
TEMPLATE_INDEX = auto()
# Inductor: iteration domain for blockIdx.x/blockIdx.y
XBLOCK = auto()
YBLOCK = auto()
ZBLOCK = auto()
# Inductor: this is used solely for dynamic_reshape_indexer
VIEW = auto()
# Alternate (non-modular) indexing used in halide kernels
HALIDE = auto()
# Invariant: there must not be a prefix which is a prefix of another string,
# as this introduces ambiguity
prefix_str = {
SymT.SIZE: "s", # integer
SymT.UNBACKED_INT: "u", # integer
# Prefix z here is chosen to avoid false aliasing in symbol_is_type test
# DO NOT add a "z" type. You also need to avoid conflicts on these
# prefixes but this is somewhat easier to manage
SymT.FLOAT: "zf",
SymT.UNBACKED_FLOAT: "zuf",
SymT.TMP: "tmp",
SymT.PRECOMPUTED_SIZE: "ps",
SymT.INDEX: "i",
SymT.R0_INDEX: "r0_",
SymT.R1_INDEX: "r1_",
SymT.TEMPLATE_INDEX: "idx",
SymT.XBLOCK: "x",
SymT.YBLOCK: "y",
SymT.ZBLOCK: "z",
SymT.INDIRECT: "indirect", # false aliasing?
SymT.VIEW: "view",
SymT.HALIDE: "h",
}
def make_symbol(prefix: SymT, idx: int, **kwargs) -> sympy.Symbol:
# TODO: maybe put the assumptions here directly
return sympy.Symbol(f"{prefix_str[prefix]}{idx}", **kwargs)
# This type is a little wider than it should be, because free_symbols says
# that it contains Basic, rather than Symbol
def symbol_is_type(sym: sympy.Basic, prefix: Union[SymT, Iterable[SymT]]) -> bool:
assert isinstance(sym, sympy.Symbol)
name_str = sym.name.lower() # Match capitalized names like XBLOCK, RBLOCK
if isinstance(prefix, SymT):
return name_str.startswith(prefix_str[prefix])
else:
return name_str.startswith(tuple(prefix_str[p] for p in prefix))
def free_symbol_is_type(e: sympy.Expr, prefix: Union[SymT, Iterable[SymT]]) -> bool:
return any(symbol_is_type(v, prefix) for v in e.free_symbols)
```
|
==========================================================================================================================
SOURCE CODE FILE: value_ranges.py
LINES: 3
SIZE: 35.42 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_sympy\value_ranges.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from __future__ import annotations
import dataclasses
import functools
import itertools
import logging
import math
import operator
from typing import (
Callable,
Generic,
Optional,
overload,
SupportsFloat,
TYPE_CHECKING,
TypeVar,
Union,
)
from typing_extensions import TypeGuard
import sympy
from sympy.logic.boolalg import Boolean as SympyBoolean, BooleanAtom
import torch
from torch._logging import LazyString
from torch._prims_common import dtype_to_type
from .functions import (
_keep_float,
FloatTrueDiv,
FloorDiv,
IntTrueDiv,
OpaqueUnaryFn_exp,
OpaqueUnaryFn_log,
OpaqueUnaryFn_log2,
OpaqueUnaryFn_sqrt,
PowByNatural,
RoundDecimal,
RoundToInt,
safe_pow,
ToFloat,
TruncToFloat,
TruncToInt,
)
from .interp import sympy_interp
from .numbers import int_oo, IntInfinity, NegativeIntInfinity
log = logging.getLogger(__name__)
__all__ = ["ValueRanges", "bound_sympy"]
_T = TypeVar("_T", sympy.Expr, SympyBoolean)
class ValueRangeError(RuntimeError):
pass
# Like sympify, but supports less stuff, and also ensures that direct
# sympy expressions don't have free variables
def simple_sympify(e):
if isinstance(e, bool):
return sympy.true if e else sympy.false
elif isinstance(e, int):
return sympy.Integer(e)
elif isinstance(e, float):
# infinity is special; we use it to bracket integers as well
if math.isinf(e):
return sympy.oo if e > 0 else -sympy.oo
return sympy.Float(e)
elif isinstance(e, sympy.Expr):
assert e.is_number, e
# NaNs can occur when doing things like 0 * sympy.oo, but it is better
# if the operator notices this and takes care of it, because sometimes
# the NaN is inappropriate (for example, for ints, the [-oo, oo] range
# should go to zero when multiplied with [0, 0])
assert e != sympy.nan
return e
elif isinstance(e, BooleanAtom):
return e
else:
raise AssertionError(f"not simple sympy type {type(e)}: {e}")
# Sympy atomics only. Unlike <=, it also works on Sympy bools.
def sympy_generic_le(lower, upper):
if isinstance(lower, sympy.Expr):
assert isinstance(upper, sympy.Expr)
# instead of lower <= upper, we do upper >= lower since upper is mostly int_oo
# and we have better code paths there.
return upper >= lower
else:
# only negative condition is True > False
assert isinstance(lower, SympyBoolean) and isinstance(upper, SympyBoolean), (
lower,
upper,
)
return not (lower and not upper)
def vr_is_bool(vr: ValueRanges[_T]) -> TypeGuard[ValueRanges[SympyBoolean]]:
return vr.is_bool
def vr_is_expr(vr: ValueRanges[_T]) -> TypeGuard[ValueRanges[sympy.Expr]]:
return not vr.is_bool
ExprIn = Union[int, float, sympy.Expr]
BoolIn = Union[bool, SympyBoolean]
AllIn = Union[ExprIn, BoolIn]
ExprFn = Callable[[sympy.Expr], sympy.Expr]
ExprFn2 = Callable[[sympy.Expr, sympy.Expr], sympy.Expr]
BoolFn = Callable[[SympyBoolean], SympyBoolean]
BoolFn2 = Callable[[SympyBoolean, SympyBoolean], SympyBoolean]
AllFn = Union[ExprFn, BoolFn]
AllFn2 = Union[ExprFn2, BoolFn2]
@dataclasses.dataclass(frozen=True)
class ValueRanges(Generic[_T]):
if TYPE_CHECKING:
# ruff doesn't understand circular references but mypy does
ExprVR = ValueRanges[sympy.Expr] # noqa: F821
BoolVR = ValueRanges[SympyBoolean] # noqa: F821
AllVR = Union[ExprVR, BoolVR]
# Although the type signature here suggests you can pass any
# sympy expression, in practice the analysis here only works
# with constant sympy expressions
lower: _T
upper: _T
is_bool: bool
is_int: bool
is_float: bool
def __repr__(self) -> str:
return f"VR[{self.lower}, {self.upper}]"
@overload
def __init__(
self: ValueRanges[sympy.Expr],
lower: ExprIn,
upper: ExprIn,
) -> None:
...
@overload
def __init__( # type: ignore[misc]
self: ValueRanges[SympyBoolean],
lower: BoolIn,
upper: BoolIn,
) -> None:
...
def __init__(self, lower: AllIn, upper: AllIn) -> None:
lower = simple_sympify(lower)
upper = simple_sympify(upper)
# TODO: when the bounds have free variables, this may be
# nontrivial to actually verify
try:
if not sympy_generic_le(lower, upper):
raise ValueRangeError(f"Invalid ranges [{lower}:{upper}]")
except TypeError as e:
raise TypeError(f"Could not compare {lower} <= {upper}") from e
is_bool_lower = isinstance(lower, SympyBoolean)
is_bool_upper = isinstance(upper, SympyBoolean)
assert is_bool_lower == is_bool_upper, (lower, upper)
# Warning: is_int/is_float is best effort. We do pretty well in
# Dynamo, but in Inductor these attributes are often wrong because we
# are not very rigorous in dtype analysis. This is also why we need
# the flexible analysis for is_int: sometimes a sympy.oo pops in for
# an integer bound. I would /like/ for us not to do this, but it's
# too hard to push the invariant through right now.
if isinstance(lower, sympy.Integer) and upper == sympy.oo:
upper = int_oo
if isinstance(upper, sympy.Integer) and lower == -sympy.oo:
lower = -int_oo
# NB: [-int_oo, -int_oo] and [int_oo, int_oo] are allowed
integer_types = (sympy.Integer, NegativeIntInfinity, IntInfinity)
is_int_lower = isinstance(lower, integer_types)
is_int_upper = isinstance(upper, integer_types)
# Because this is a frozen class
object.__setattr__(self, "lower", lower)
object.__setattr__(self, "upper", upper)
# Unlike bool/int in Python, we don't report bools are ints
#
# NB: is_bool_lower == is_bool_upper, so we only need to check one
object.__setattr__(self, "is_bool", is_bool_lower)
object.__setattr__(
self,
"is_int",
not self.is_bool and is_int_lower and is_int_upper,
)
"""
# This assert is just impossible right now, too many sympy bugs
if self.is_int:
# NB: sympy will sometimes randomly lose the float-ness of zero,
# so we also need to account for that in the assertion here.
# See also https://github.com/sympy/sympy/issues/26620
assert isinstance(lower, sympy.Integer) or lower in [-sympy.oo, 0], (
lower,
upper,
)
assert isinstance(upper, sympy.Integer) or upper in [sympy.oo, 0], (lower, upper)
"""
# NB: [-oo, oo] always advertises as float!
object.__setattr__(self, "is_float", not self.is_bool and not self.is_int)
assert self.is_bool or self.is_int or self.is_float, (lower, upper)
def boolify(self) -> ValueRanges[SympyBoolean]:
if vr_is_bool(self):
return self
elif self == ValueRanges.unknown():
return ValueRanges.unknown_bool()
else:
raise AssertionError(f"not bool like {self}")
def __contains__(self, x: AllIn) -> bool:
return ValueRanges.wrap(x).issubset(self)
def issubset(self, other):
if other is self.unknown_int():
return True
return sympy_generic_le(other.lower, self.lower) and sympy_generic_le(
self.upper, other.upper
)
def tighten(self, other) -> ValueRanges:
"""Given two ValueRanges, returns their intersection"""
return self & other
# Intersection
@overload
def __and__(
self: ValueRanges[sympy.Expr],
other: ValueRanges[sympy.Expr],
) -> ValueRanges[sympy.Expr]:
...
@overload
def __and__( # type: ignore[misc]
self: ValueRanges[SympyBoolean],
other: ValueRanges[SympyBoolean],
) -> ValueRanges[SympyBoolean]:
...
def __and__(self: AllVR, other: AllVR) -> AllVR:
if other in (ValueRanges.unknown(), ValueRanges.unknown_int()):
return self
if self in (ValueRanges.unknown(), ValueRanges.unknown_int()):
return other
assert self.is_bool == other.is_bool, (self, other)
assert self.is_int == other.is_int, (self, other)
assert self.is_float == other.is_float, (self, other)
if self.is_bool:
return ValueRanges(
sympy.Or(self.lower, other.lower), sympy.And(self.upper, other.upper)
)
else:
return ValueRanges(
sympy.Max(self.lower, other.lower), sympy.Min(self.upper, other.upper)
)
# Union
@overload
def __or__(
self: ValueRanges[sympy.Expr],
other: ValueRanges[sympy.Expr],
) -> ValueRanges[sympy.Expr]:
...
@overload
def __or__( # type: ignore[misc]
self: ValueRanges[SympyBoolean],
other: ValueRanges[SympyBoolean],
) -> ValueRanges[SympyBoolean]:
...
def __or__(self: AllVR, other: AllVR) -> AllVR:
if ValueRanges.unknown() in (self, other):
return ValueRanges.unknown()
assert self.is_bool == other.is_bool, (self, other)
assert self.is_int == other.is_int, (self, other)
assert self.is_float == other.is_float, (self, other)
if self.is_bool:
return ValueRanges(
sympy.And(self.lower, other.lower), sympy.Or(self.upper, other.upper)
)
else:
return ValueRanges(
sympy.Min(self.lower, other.lower), sympy.Max(self.upper, other.upper)
)
def is_singleton(self) -> bool:
return self.lower == self.upper
@staticmethod
@functools.cache
def unknown() -> ValueRanges[sympy.Expr]:
return ValueRanges(-sympy.oo, sympy.oo)
@staticmethod
@functools.cache
def unknown_int() -> ValueRanges[sympy.Expr]:
return ValueRanges(-int_oo, int_oo)
@staticmethod
@functools.cache
def unknown_bool() -> ValueRanges[SympyBoolean]:
return ValueRanges(sympy.false, sympy.true)
@overload
@staticmethod
# work around the fact that bool and int overlap
def wrap(arg: Union[ExprIn, ExprVR]) -> ExprVR: # type: ignore[overload-overlap]
...
@overload
@staticmethod
def wrap(arg: Union[BoolIn, BoolVR]) -> BoolVR: # type: ignore[misc]
...
@staticmethod
def wrap(arg: Union[AllIn, AllVR]) -> AllVR:
if isinstance(arg, ValueRanges):
return arg
if isinstance(arg, float) and math.isnan(arg):
return ValueRanges.unknown()
# arg is either ExprIn or BoolIn, but we don't know it here
return ValueRanges(arg, arg) # type: ignore[arg-type]
@staticmethod
def increasing_map(x: Union[ExprIn, ExprVR], fn: ExprFn) -> ExprVR:
"""Increasing: x <= y => f(x) <= f(y)."""
x = ValueRanges.wrap(x)
return ValueRanges(fn(x.lower), fn(x.upper))
@overload
@staticmethod
def decreasing_map(x: Union[ExprIn, ExprVR], fn: ExprFn) -> ExprVR:
...
@overload
@staticmethod
def decreasing_map(x: Union[BoolIn, BoolVR], fn: BoolFn) -> BoolVR: # type: ignore[misc]
...
@staticmethod
def decreasing_map(x: Union[AllIn, AllVR], fn: AllFn) -> AllVR:
"""Decreasing: x <= y => f(x) >= f(y)."""
x = ValueRanges.wrap(x)
# consistently either Expr or Bool, but we don't know it here
return ValueRanges(fn(x.upper), fn(x.lower)) # type: ignore[arg-type]
@staticmethod
def monotone_map(x: Union[ExprIn, ExprVR], fn: ExprFn) -> ExprVR:
"""It's increasing or decreasing."""
x = ValueRanges.wrap(x)
l = fn(x.lower)
u = fn(x.upper)
return ValueRanges(min(l, u), max(l, u))
@staticmethod
def convex_min_zero_map(x: Union[ExprIn, ExprVR], fn: ExprFn) -> ExprVR:
"""Fn is convex and has a minimum at 0."""
x = ValueRanges.wrap(x)
if 0 in x:
upper = max(fn(x.lower), fn(x.upper))
upper = simple_sympify(upper)
if isinstance(upper, sympy.Float) or upper == sympy.oo:
return ValueRanges(0.0, upper)
return ValueRanges(0, upper)
return ValueRanges.monotone_map(x, fn)
@overload
@staticmethod
def coordinatewise_increasing_map(
x: Union[ExprIn, ExprVR],
y: Union[ExprIn, ExprVR],
fn: ExprFn2,
) -> ExprVR:
...
@overload
@staticmethod
def coordinatewise_increasing_map( # type: ignore[misc]
x: Union[BoolIn, BoolVR],
y: Union[BoolIn, BoolVR],
fn: BoolFn2,
) -> BoolVR:
...
@staticmethod
def coordinatewise_increasing_map(
x: Union[AllIn, AllVR],
y: Union[AllIn, AllVR],
fn: AllFn2,
) -> AllVR:
"""
It's increasing on each coordinate.
Mathematically:
For every 1 <= i <= n and x_i <= y_i we have that
f(x1, .., xn) <= f(x1, , yi, ..., xn)
"""
x, y = ValueRanges.wrap(x), ValueRanges.wrap(y)
return ValueRanges(
fn(x.lower, y.lower), # type: ignore[arg-type]
fn(x.upper, y.upper), # type: ignore[arg-type]
)
@classmethod
def coordinatewise_monotone_map(cls, x, y, fn):
"""It's increasing or decreasing on each coordinate."""
x, y = cls.wrap(x), cls.wrap(y)
products = [
fn(a, b)
for a, b in itertools.product([x.lower, x.upper], [y.lower, y.upper])
]
return ValueRanges(min(products), max(products))
class SymPyValueRangeAnalysis:
"""
It gives bounds on a SymPy operator given bounds on its arguments
See the function `bound_sympy` for a function that applies this logic to a full SymPy expression
"""
@staticmethod
def constant(value, dtype):
if isinstance(value, ValueRanges):
assert value.is_singleton()
value = value.lower
# NB: value is NOT a sympy expression, it's a constant!
is_python = isinstance(value, (int, float, bool))
assert is_python or isinstance(
value, (BooleanAtom, sympy.Integer, sympy.Number)
)
# using nan makes subsequent computation throw, and for the purposes of optimization
# returning -math.inf - math.inf is equivalent to giving up
if isinstance(value, SupportsFloat) and math.isnan(value):
if dtype == torch.bool:
return ValueRanges.unknown_bool()
elif dtype.is_floating_point:
return ValueRanges.unknown()
else:
return ValueRanges.unknown_int()
if is_python:
type_ = dtype_to_type(dtype)
value = type_(value)
else:
# We do a type check on a best-effort basis
# We don't want to force a cast to sympy.Float if the value is Rational to avoid losing precision
if dtype == torch.bool:
assert isinstance(value, BooleanAtom)
elif dtype.is_floating_point:
assert not value.is_finite or value.is_real
else:
# dtype is intXX
assert value.is_integer
r = ValueRanges.wrap(value)
return r
@staticmethod
def to_dtype(a, dtype, src_dtype=None):
if dtype == torch.float64:
return ValueRanges.increasing_map(a, ToFloat)
elif dtype == torch.bool:
return ValueRanges.unknown_bool()
elif not dtype.is_floating_point:
return ValueRanges.unknown_int()
return ValueRanges.unknown()
@staticmethod
def trunc_to_int(a, dtype):
return ValueRanges.increasing_map(a, TruncToInt)
@staticmethod
def not_(a):
a = ValueRanges.wrap(a)
a = a.boolify()
assert a.is_bool
return ValueRanges.decreasing_map(a, sympy.Not)
@staticmethod
def or_(a, b):
return ValueRanges.coordinatewise_increasing_map(a, b, sympy.Or)
@staticmethod
def and_(a, b):
return ValueRanges.coordinatewise_increasing_map(a, b, sympy.And)
@staticmethod
def _bool_to_int(x):
if x.is_singleton():
return ValueRanges.wrap(sympy.Integer(1 if x.lower else 0))
else:
return ValueRanges(sympy.Integer(0), sympy.Integer(1))
@classmethod
def bitwise_and(cls, a, b):
a, b = ValueRanges.wrap(a), ValueRanges.wrap(b)
if a.is_bool and b.is_bool:
return cls.and_(a, b)
if a.is_bool:
a = cls._bool_to_int(a)
if b.is_bool:
b = cls._bool_to_int(b)
lower = min(a.lower, b.lower)
if lower < 0 and lower != -sympy.oo and lower != -int_oo:
# If both lower bounds are negative, then bits start like
# 1...10..., so the smallest possible value is 1...101...1.
# Thus, we need to find the next smallest power of 2 (inclusive).
try:
lower = -(1 << int(-lower - 1).bit_length())
except Exception:
lower = -int_oo
else:
lower = 0
return ValueRanges(lower, max(a.upper, b.upper))
@classmethod
def bitwise_or(cls, a, b):
a, b = ValueRanges.wrap(a), ValueRanges.wrap(b)
if a.is_bool and b.is_bool:
return cls.or_(a, b)
if a.is_bool:
a = cls._bool_to_int(a)
if b.is_bool:
b = cls._bool_to_int(b)
upper = max(a.upper, b.upper)
if upper == 0:
upper = 0
elif upper > 0 and upper != sympy.oo and upper != int_oo:
# If both upper bounds are positive, then the largest
# possible value is 01...1, so we need to find
# next largest power of 2 (exclusive), minus 1
try:
upper = (1 << int(upper).bit_length()) - 1
except Exception:
upper = int_oo
elif upper < 0:
upper = -1
return ValueRanges(min(a.lower, b.lower), upper)
@staticmethod
def eq(a, b):
a = ValueRanges.wrap(a)
b = ValueRanges.wrap(b)
if a.is_singleton() and b.is_singleton() and a.lower == b.lower:
return ValueRanges.wrap(sympy.true)
elif a.lower > b.upper or b.lower > a.upper: # ranges disjoint
return ValueRanges.wrap(sympy.false)
return ValueRanges(sympy.false, sympy.true)
@classmethod
def ne(cls, a, b):
return cls.not_(cls.eq(a, b))
@classmethod
def identity(cls, a):
return ValueRanges.wrap(a)
@classmethod
def lt(cls, a, b):
a = ValueRanges.wrap(a)
b = ValueRanges.wrap(b)
assert a.is_bool == b.is_bool
if a.is_bool:
return cls.and_(cls.not_(a), b)
else:
if a.upper < b.lower:
return ValueRanges.wrap(sympy.true)
elif a.lower >= b.upper:
return ValueRanges.wrap(sympy.false)
return ValueRanges(sympy.false, sympy.true)
@classmethod
def gt(cls, a, b):
return cls.lt(b, a)
@classmethod
def le(cls, a, b):
return cls.not_(cls.gt(a, b))
@classmethod
def ge(cls, a, b):
return cls.not_(cls.lt(a, b))
@staticmethod
def add(a, b):
return ValueRanges.coordinatewise_increasing_map(
a, b, _keep_float(operator.add)
)
@classmethod
def mul(cls, a, b):
a = ValueRanges.wrap(a)
b = ValueRanges.wrap(b)
assert a.is_bool == b.is_bool
if a.is_bool:
return cls.and_(a, b)
def safe_mul(a, b):
# Make unknown() * wrap(0.0) == wrap(0.0)
if a == 0.0 or a == 0:
return a
elif b == 0.0 or b == 0:
return b
else:
return a * b
return ValueRanges.coordinatewise_monotone_map(a, b, _keep_float(safe_mul))
@staticmethod
def int_truediv(a, b):
a = ValueRanges.wrap(a)
b = ValueRanges.wrap(b)
if 0 in b or ((-int_oo in a or int_oo in a) and (-int_oo in b or int_oo in b)):
return ValueRanges.unknown()
else:
return ValueRanges.coordinatewise_monotone_map(
a, b, _keep_float(IntTrueDiv)
)
@staticmethod
def truediv(a, b):
a = ValueRanges.wrap(a)
b = ValueRanges.wrap(b)
if 0 in b or (
(-sympy.oo in a or sympy.oo in a) and (-sympy.oo in b or sympy.oo in b)
):
return ValueRanges.unknown()
else:
return ValueRanges.coordinatewise_monotone_map(
a, b, _keep_float(FloatTrueDiv)
)
@staticmethod
def floordiv(a, b):
a = ValueRanges.wrap(a)
b = ValueRanges.wrap(b)
if 0 in b:
return ValueRanges.unknown_int()
products = []
for x, y in itertools.product([a.lower, a.upper], [b.lower, b.upper]):
r = FloorDiv(x, y)
if r is sympy.nan:
products.append((sympy.sign(x) * sympy.sign(y)) * int_oo)
else:
products.append(r)
return ValueRanges(min(products), max(products))
@classmethod
def mod(cls, x, y):
x = ValueRanges.wrap(x)
y = ValueRanges.wrap(y)
# nb. We implement C semantics
def c_mod(a, b):
ret = abs(a) % abs(b)
if a < 0:
ret *= -1
return ret
def c_div(a, b):
x = a / b
return sympy.Integer(x) if x.is_finite and x not in (int_oo, -int_oo) else x
if 0 in y:
return ValueRanges.unknown_int()
elif y.is_singleton():
y_val = abs(y.lower)
# If it wraps, we need to take the whole interval
# The function is locally linear if they are in the same class
if c_div(x.lower, y_val) == c_div(x.upper, y_val):
return ValueRanges.increasing_map(x, lambda u: c_mod(u, y_val))
if x.upper < 0:
# Negative case
return ValueRanges(-y_val + 1, 0)
elif x.lower > 0:
# Positive case
return ValueRanges(0, y_val - 1)
else:
# Mixed case
lower = max(-y_val + 1, x.lower)
upper = min(y_val - 1, x.upper)
return ValueRanges(lower, upper)
else:
# Too difficult, we bail out
upper = cls.abs(y).upper - 1
return ValueRanges(-upper, upper)
@classmethod
def modular_indexing(cls, a, b, c):
return cls.mod(cls.floordiv(a, b), c)
@classmethod
def is_non_overlapping_and_dense_indicator(cls, *args):
return ValueRanges.unknown_int()
@classmethod
def pow_by_natural(cls, a, b):
a = ValueRanges.wrap(a)
b = ValueRanges.wrap(b)
if a.is_singleton() and b.is_singleton():
return ValueRanges.wrap(safe_pow(a.lower, b.lower))
# NB: Exclude zero, because zero is special
elif a.lower >= 1:
# We should know that b >= 0 but we may have forgotten this fact due
# to replacements, so don't assert it, but DO clamp it to prevent
# degenerate problems
return ValueRanges.coordinatewise_increasing_map(
a, b & ValueRanges(0, int_oo), PowByNatural
)
elif b.is_singleton():
if b.lower % 2 == 0:
# x^n where n is even
return ValueRanges.convex_min_zero_map(
a, lambda x: safe_pow(x, b.lower)
)
else:
# x^n where n is odd
return ValueRanges.increasing_map(a, lambda x: safe_pow(x, b.lower))
else:
# a is potentially negative, and we don't know if the exponent is
# even or odd. So just conservatively set the upper and lower
# bound based on what the maximum absolute value could be, in both
# directions
max_base = max(a.upper, -a.lower)
return ValueRanges(
-(safe_pow(max_base, b.upper)), safe_pow(max_base, b.upper)
)
@classmethod
def pow(cls, a, b):
return ValueRanges.unknown()
# We could implement all this, but for floating point pow, is there
# really a point?
"""
a = ValueRanges.wrap(a)
b = ValueRanges.wrap(b)
# Not implemented yet. It's a bit tricky
# If you want to implement it, compute the partial derivatives of a ** b
# and check the ranges where the function is increasing / decreasing
# Another non-tight way of doing this is defaulting to doing noting that for a > 0, a ** b == exp(b * log(a))
# If this second option is implemented, by carefult about the types and possible infinities here and there.
if not b.is_singleton():
return ValueRanges.unknown()
b = b.lower
if a.is_singleton():
a = a.lower
r = a**b
if not r.is_finite:
return ValueRanges.unknown()
return ValueRanges.wrap(r)
if b == 0:
if not a.lower.is_finite:
return ValueRanges.unknown()
return ValueRanges.wrap(1.0)
if b < 0:
a = cls.reciprocal(a)
b = -b
if a == ValueRanges.unknown():
return ValueRanges.unknown()
# If the base is positive, then we're good, otherwise nothing's defined
if a.lower >= 0:
return ValueRanges.increasing_map(a, lambda x: x**b)
else:
return ValueRanges.unknown()
"""
@staticmethod
def reciprocal(x):
"""Needed as it's used in pow, but it won't appear on a SymPy expression"""
x = ValueRanges.wrap(x)
if 0 in x:
return ValueRanges.unknown()
else:
return ValueRanges.decreasing_map(x, lambda y: FloatTrueDiv(1.0, y)) # type: ignore[operator]
@staticmethod
def abs(x):
return ValueRanges.convex_min_zero_map(x, abs)
@staticmethod
def exp(x):
return ValueRanges.increasing_map(x, OpaqueUnaryFn_exp)
@staticmethod
def log(x):
x = ValueRanges.wrap(x)
if x.lower <= 0:
return ValueRanges.unknown()
return ValueRanges.increasing_map(x, OpaqueUnaryFn_log)
@staticmethod
def log2(x):
x = ValueRanges.wrap(x)
if x.lower <= 0:
return ValueRanges.unknown()
return ValueRanges.increasing_map(x, OpaqueUnaryFn_log2)
@classmethod
def minimum(cls, a, b):
return cls.min_or_max(a, b, sympy.Min)
@classmethod
def maximum(cls, a, b):
return cls.min_or_max(a, b, sympy.Max)
@staticmethod
def min_or_max(a, b, fn):
a = ValueRanges.wrap(a)
b = ValueRanges.wrap(b)
return ValueRanges.coordinatewise_increasing_map(a, b, fn)
@classmethod
def floor_to_int(cls, x, dtype):
return ValueRanges.increasing_map(x, sympy.functions.elementary.integers.floor)
@classmethod
def ceil_to_int(cls, x, dtype):
return ValueRanges.increasing_map(
x, sympy.functions.elementary.integers.ceiling
)
# I think these implementations are sound. The hazard here is that sympy
# will carry out the floor/ceil at too high precision and then something
# bad will happen when we convert it to float.
#
# For truncation, the implementation is clearly sound, because the desired
# target float is always exactly representable, since you're just chopping
# off bits the mantissa. But what about ceil/floor?
#
# The important constraint here is that we're not defining floor on
# arbitrary real numbers, only representable float numbers. So we can
# take advantage of the fact that before we reach the first
# unrepresentable integer in floating point space, we have the range of
# numbers corresponding to exponent zero: all integers, with no fractional
# amounts. floor/ceil is an identity operation in this case. In the
# range below here, representable floating point numbers are spaced
# exactly 1/2 apart, and notably, both the floor/ceil are defined floating
# point numbers. There is no "gap" as you step up to the next exponent.
@classmethod
def floor(cls, x):
return ValueRanges.increasing_map(
x, _keep_float(sympy.functions.elementary.integers.floor)
)
@classmethod
def ceil(cls, x):
return ValueRanges.increasing_map(
x, _keep_float(sympy.functions.elementary.integers.ceiling)
)
@classmethod
def round_decimal(cls, number, ndigits):
if not ndigits.is_singleton():
return ValueRanges.unknown()
ndigits = ndigits.lower
# We can't use functools.partial here since sympy doesn't support keyword arguments, but we have to bind
# the second parameter.
fn = lambda number: RoundDecimal(number, ndigits) # type: ignore[misc, assignment] # noqa: E731
return ValueRanges.increasing_map(number, fn)
@classmethod
def round_to_int(cls, number, dtype):
return ValueRanges.increasing_map(number, RoundToInt)
# It's used in some models on symints
@staticmethod
def sqrt(x):
x = ValueRanges.wrap(x)
if x.lower < 0:
return ValueRanges.unknown()
return ValueRanges.increasing_map(x, OpaqueUnaryFn_sqrt)
@staticmethod
def where(a, b, c):
b = ValueRanges.wrap(b)
c = ValueRanges.wrap(c)
a = a.boolify()
# We sometimes write unknown without specifying the type correctly
# In particular, we do that when initialising the bounds for loads in bounds.py
assert b.is_bool == c.is_bool or ValueRanges.unknown() in (b, c)
if b.is_bool:
return ValueRanges(sympy.And(b.lower, c.lower), sympy.Or(b.upper, c.upper))
else:
return ValueRanges(sympy.Min(b.lower, c.lower), sympy.Max(b.upper, c.upper))
# expr_cond_pair is used to represent a single (expr, condition) pair in piecewise.
# We just return the value range of the expression and its corresponding condition as a tuple
# and defer the analysis to piecewise
@staticmethod
def expr_cond_pair(a, b):
b = b.boolify()
return (a, b)
# piecewise function can be used to convert a SymBool to SymInt:
# int_expr = Piecewise((1, bool_expr), (0, True)), it evalutes to 1 when sym_bool is True and 0 otherwise.
#
# ranges is a sequence of (expr_range, condition_range) pairs. The range pair is constructed in expr_cond_pair.
# The ValueRange of Piecewise is just the union of all expr ranges whose condition expr can be True.
@staticmethod
def piecewise(*ranges):
init_range = None
for expr_range, cond_range in ranges:
if sympy.true in cond_range:
if init_range is None:
init_range = expr_range
else:
init_range = init_range | expr_range
return init_range
@staticmethod
def cos(x):
# TODO: We should tighten value ranges
# If input range span is pi + 2*pi*k, then output range is (-1, 1)
# otherwise the minimum of the value of the function on the extremes
return ValueRanges(-1.0, 1.0)
@staticmethod
def cosh(x):
return ValueRanges(0.0, sympy.oo)
"""
x = ValueRanges.wrap(x)
if x.lower > 0:
return ValueRanges.increasing_map(x, OpaqueUnaryFn_cosh)
elif x.upper < 0:
return ValueRanges.decreasing_map(x, OpaqueUnaryFn_cosh)
return ValueRanges(0.0, sympy.oo)
"""
@staticmethod
def sin(x):
# TODO: We should tighten value ranges
# See details on cos
return ValueRanges(-1.0, 1.0)
@staticmethod
def sinh(x):
# return ValueRanges.increasing_map(x, OpaqueUnaryFn_sinh)
return ValueRanges(-sympy.oo, sympy.oo)
@staticmethod
def tan(x):
return ValueRanges(-sympy.oo, sympy.oo)
@staticmethod
def tanh(x):
# return ValueRanges.increasing_map(x, OpaqueUnaryFn_tanh)
return ValueRanges(-sympy.oo, sympy.oo)
@staticmethod
def asin(x):
return ValueRanges(-sympy.oo, sympy.oo)
"""
x = ValueRanges.wrap(x)
if -1 <= x.lower and x.upper <= 1:
return ValueRanges.increasing_map(x, OpaqueUnaryFn_asinh)
return ValueRanges.unknown()
"""
@staticmethod
def acos(x):
return ValueRanges(-sympy.oo, sympy.oo)
"""
x = ValueRanges.wrap(x)
if -1 <= x.lower and x.upper <= 1:
return ValueRanges.decreasing_map(x, OpaqueUnaryFn_acos)
return ValueRanges.unknown()
"""
@staticmethod
def atan(x):
return ValueRanges(-sympy.oo, sympy.oo)
# return ValueRanges.increasing_map(x, OpaqueUnaryFn_atan)
@staticmethod
def trunc(x):
return ValueRanges.increasing_map(x, TruncToFloat)
def bound_sympy(
expr: sympy.Expr, ranges: Optional[dict[sympy.Symbol, ValueRanges]] = None
) -> ValueRanges:
log.debug(
"bound_sympy(%s)%s",
expr,
LazyString(
lambda: (
"\n"
+ "\n".join(
f" {k}: {r}" for k, r in ranges.items() if k in expr.free_symbols
)
if ranges
else ""
)
),
)
if isinstance(expr, sympy.Number):
return ValueRanges.wrap(expr)
ranges = ranges or {}
# If there's a tracing context, augment available constrained ranges.
context = torch._guards.TracingContext.try_get()
if context and context.fake_mode.shape_env:
if ranges:
ranges = {**context.fake_mode.shape_env.var_to_range, **ranges}
else:
ranges = context.fake_mode.shape_env.var_to_range
def missing_handler(s):
if s.is_integer: # type: ignore[attr-defined]
if s.is_positive: # type: ignore[attr-defined]
vr = ValueRanges(1, int_oo)
elif s.is_nonnegative: # type: ignore[attr-defined]
vr = ValueRanges(0, int_oo)
else:
vr = ValueRanges.unknown_int()
else:
# Don't bother trying very hard here
vr = ValueRanges.unknown()
return vr
return sympy_interp(
SymPyValueRangeAnalysis, ranges, expr, missing_handler=missing_handler
)
```
|
=============================================================================================================
SOURCE CODE FILE: _thunk.py
LINES: 1
SIZE: 0.64 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_thunk.py
ENCODING: utf-8
```py
from typing import Callable, Generic, Optional, TypeVar
R = TypeVar("R")
class Thunk(Generic[R]):
"""
A simple lazy evaluation implementation that lets you delay
execution of a function. It properly handles releasing the
function once it is forced.
"""
f: Optional[Callable[[], R]]
r: Optional[R]
__slots__ = ["f", "r"]
def __init__(self, f: Callable[[], R]):
self.f = f
self.r = None
def force(self) -> R:
if self.f is None:
return self.r # type: ignore[return-value]
self.r = self.f()
self.f = None
return self.r
```
|
=================================================================================================================
SOURCE CODE FILE: _traceback.py
LINES: 1
SIZE: 10.28 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_traceback.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from types import TracebackType
from typing import Optional
import tempfile
import traceback
import contextlib
import inspect
import os.path
# This file contains utilities for ensuring dynamically compile()'d
# code fragments display their line numbers in backtraces.
#
# The constraints:
#
# - We don't have control over the user exception printer (in particular,
# we cannot assume the linecache trick will work, c.f.
# https://stackoverflow.com/q/50515651/23845 )
#
# - We don't want to create temporary files every time we compile()
# some code; file creation should happen lazily only at exception
# time. Arguably, you *should* be willing to write out your
# generated Python code to file system, but in some situations
# (esp. library code) it would violate user expectation to write
# to the file system, so we try to avoid it. In particular, we'd
# like to keep the files around, so users can open up the files
# mentioned in the trace; if the file is invisible, we want to
# avoid clogging up the filesystem.
#
# If this is not a constraint for you, there is a substantially simpler
# way to implement the functionality in this PR: instead of using
# eval/exec directly, just always write a Python file to filesystem
# and compile that.
#
# - You have control over a context where the compiled code will get
# executed, so that we can interpose while the stack is unwinding
# (otherwise, we have no way to interpose on the exception printing
# process.)
#
# There are two things you have to do to make use of the utilities here:
#
# - When you compile your source code, you must save its string source
# in its f_globals under the magic name "__compile_source__"
#
# - Before running the compiled code, enter the
# report_compile_source_on_error() context manager.
@contextlib.contextmanager
def report_compile_source_on_error():
try:
yield
except Exception as exc:
tb = exc.__traceback__
# Walk the traceback, looking for frames that have
# source attached
stack = []
while tb is not None:
filename = tb.tb_frame.f_code.co_filename
source = tb.tb_frame.f_globals.get("__compile_source__")
if filename == "<string>" and source is not None:
# What black magic are we doing here? Intuitively, what
# we would like to do is overwrite the co_filename on any
# frames that were generated from exec/eval so that they
# point to a temporary file that has the actual line
# information, so Python's default error printer can print
# useful line information on it.
#
# Writing out the temporary file is easy. But overwriting
# co_filename is not! You can't modify the code object
# associated with a frame. You can, however, reconstruct
# a traceback with entirely new frames from scratch, so that's
# what we do. But there's another problem, which is how to
# make the frame?
#
# The black magic is we make a frankenstein frame and code
# object which resembles the original frame/code enough so
# that it will print properly under traceback and the default
# error printer, but IT IS NOT THE ORIGINAL FRAME (you
# couldn't, e.g., execute its code with different variables
# and expect it to work.)
# Don't delete the temporary file so the user can inspect it
# TODO: This creates a temporary file for every frame, but we
# technically only need one per distinct __compile_source__
with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix=".py") as f:
f.write(source)
# Create a frame. Python doesn't let you construct
# FrameType directly, so just make one with compile
frame = tb.tb_frame
code = compile('__inspect_currentframe()', f.name, 'eval')
code = code.replace(co_name=frame.f_code.co_name)
# Python 3.11 only
if hasattr(frame.f_code, 'co_linetable'):
# We can't copy ALL of the metadata over, because you
# can cause Python to segfault this way. What exactly
# do we need? We need enough information for
# traceback to be able to print the exception
# correctly. Code reading Lib/traceback.py reveals
# that traceback calls code.co_positions() in order to
# get the augmented line/col numbers. Objects/codeobject.c,
# specifically _PyCode_InitAddressRange, reveals that
# this iterator is initialized from co_linetable and
# co_firstfileno. So copy these we must!
code = code.replace( # type: ignore[call-arg]
co_linetable=frame.f_code.co_linetable, # type: ignore[attr-defined]
co_firstlineno=frame.f_code.co_firstlineno, # type: ignore[attr-defined]
)
fake_frame = eval(
code,
frame.f_globals,
{
**frame.f_locals,
'__inspect_currentframe': inspect.currentframe
}
)
fake_tb = TracebackType(
None, fake_frame, tb.tb_lasti, tb.tb_lineno
)
stack.append(fake_tb)
else:
stack.append(tb)
tb = tb.tb_next
# Reconstruct the linked list
tb_next = None
for tb in reversed(stack):
tb.tb_next = tb_next
tb_next = tb
raise exc.with_traceback(tb_next) # noqa: B904
def shorten_filename(fn, *, base=None):
"""Shorten a source filepath, with the assumption that torch/ subdirectories don't need to be shown to user."""
if base is None:
base = os.path.dirname(os.path.dirname(__file__))
# Truncate torch/foo.py to foo.py
try:
prefix = os.path.commonpath([fn, base])
except ValueError:
return fn
else:
return fn[len(prefix) + 1:]
def format_frame(frame, *, base=None, line=False):
"""
Format a FrameSummary in a short way, without printing full absolute path or code.
The idea is the result fits on a single line.
"""
extra_line = ""
if line:
extra_line = f"{frame.line} # "
return f"{extra_line}{shorten_filename(frame.filename, base=base)}:{frame.lineno} in {frame.name}"
def format_traceback_short(tb):
"""Format a TracebackType in a short way, printing only the inner-most frame."""
return format_frame(traceback.extract_tb(tb)[-1])
class CapturedTraceback:
__slots__ = ['tb', 'skip']
def __init__(self, tb, skip=0):
self.tb = tb
self.skip = skip
def cleanup(self):
self.tb = None
def summary(self):
import torch._C._profiler
if self.tb is None:
# TODO: Maybe indicate that the traceback was elided?
return traceback.StackSummary()
return _extract_symbolized_tb(
torch._C._profiler.symbolize_tracebacks([self.tb])[0],
self.skip
)
def __getstate__(self):
return (None, {
'tb': None, # TB is not pickleable
'skip': self.skip,
})
@staticmethod
def extract(*, script=False, cpp=False, skip=0):
"""
Like traceback.extract_stack(), but faster (approximately 20x faster); it
is fast enough that you can unconditionally log stacks this way as part of
normal execution. It returns a torch._C._profiler.CapturedTraceback
object that must be formatted specially with format_captured_tb.
By default, this only reports Python backtraces (like extract_stack). You
can set the script/cpp kwargs to also turn on TorchScript/C++ trace
reporting.
"""
import torch._C._profiler
if script or cpp:
assert skip == 0, "skip with script/cpp NYI"
return CapturedTraceback(
torch._C._profiler.gather_traceback(python=True, script=script, cpp=cpp),
# Elide extract() frame if we don't have script/cpp frames. If
# we do have those frames, it doesn't work so force zero.
0 if script or cpp else skip + 1
)
def format(self):
"""
Formats a single torch._C._profiler.CapturedTraceback into a list of
strings equivalent to the output of traceback.format_list. Note that if
pass it CapturedTraceback with C++ traces, it is better not to use this
function and use the batch formatting API format_captured_tbs to amortize
the cost of symbolization
"""
return traceback.format_list(self.summary())
@staticmethod
def format_all(tbs):
"""
Bulk version of CapturedTraceback.format. Returns a list of list of strings.
"""
import torch._C._profiler
# Directly populate tracebacks that already have cached summaries
rs: list[Optional[list[str]]] = []
delayed_idxs = []
for i, tb in enumerate(tbs):
if tb.tb is None:
rs.append([])
else:
rs.append(None)
delayed_idxs.append(i)
torch._C._profiler.symbolize_tracebacks([tbs[i].tb for i in delayed_idxs])
for i in delayed_idxs:
rs[i] = traceback.format_list(tbs[i].summary())
return rs
def _extract_symbolized_tb(tb, skip):
"""
Given a symbolized traceback from symbolize_tracebacks, return a StackSummary object of
pre-processed stack trace entries.
"""
stack = traceback.StackSummary()
for f in reversed(tb[skip:]):
stack.append(traceback.FrameSummary(f['filename'], f['line'], f['name']))
return stack
```
|
==============================================================================================================
SOURCE CODE FILE: _triton.py
LINES: 1
SIZE: 3.68 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_triton.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import functools
import hashlib
@functools.lru_cache(None)
def has_triton_package() -> bool:
try:
from triton.compiler.compiler import triton_key
return triton_key is not None
except ImportError:
return False
except RuntimeError:
return False
@functools.lru_cache(None)
def has_triton_tma():
if has_triton_package():
import torch
if (
torch.cuda.is_available()
and torch.cuda.get_device_capability() >= (9, 0)
and not torch.version.hip
):
try:
from triton.tools.experimental_descriptor import ( # noqa: F401
create_1d_tma_descriptor,
create_2d_tma_descriptor,
)
return True
except ImportError:
pass
return False
@functools.lru_cache(None)
def has_triton_tma_device():
if has_triton_package():
import torch
if (
torch.cuda.is_available()
and torch.cuda.get_device_capability() >= (9, 0)
and not torch.version.hip
):
try:
from triton.language.extra.cuda import ( # noqa: F401
experimental_device_tensormap_create1d,
experimental_device_tensormap_create2d,
)
return True
except ImportError:
pass
return False
@functools.lru_cache(None)
def has_triton() -> bool:
if not has_triton_package():
return False
from torch._dynamo.device_interface import get_interface_for_device
def cuda_extra_check(device_interface):
return device_interface.Worker.get_device_properties().major >= 7
def cpu_extra_check(device_interface):
import triton.backends
return "cpu" in triton.backends.backends
def _return_true(device_interface):
return True
triton_supported_devices = {
"cuda": cuda_extra_check,
"xpu": _return_true,
"cpu": cpu_extra_check,
}
def is_device_compatible_with_triton():
for device, extra_check in triton_supported_devices.items():
device_interface = get_interface_for_device(device)
if device_interface.is_available() and extra_check(device_interface):
return True
return False
return is_device_compatible_with_triton()
@functools.lru_cache(None)
def triton_backend():
from triton.compiler.compiler import make_backend
from triton.runtime.driver import driver
target = driver.active.get_current_target()
return make_backend(target)
@functools.lru_cache(None)
def triton_hash_with_backend():
from triton.compiler.compiler import triton_key
backend = triton_backend()
key = f"{triton_key()}-{backend.hash()}"
# Hash is upper case so that it can't contain any Python keywords.
return hashlib.sha256(key.encode("utf-8")).hexdigest().upper()
def dtype_to_string(dtype):
if dtype.name.startswith("fp"):
suffix = "float" + dtype.name[2:]
elif dtype.name.startswith("bf"):
suffix = "bfloat" + dtype.name[2:]
else:
suffix = dtype.name
return "triton.language." + suffix
def patch_triton_dtype_repr():
import triton
# Hack to get triton dtype repr to produce an evaluatable expression
# triton.language.float32 emits triton.language.fp32 which does not
# exist
# REMOVE when https://github.com/openai/triton/pull/3342 lands
triton.language.dtype.__repr__ = lambda self: dtype_to_string(self)
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.