diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/api/__init__.py b/env-llmeval/lib/python3.10/site-packages/torchgen/api/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/autograd.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/autograd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71a234e291d65e594e6cde27d6f242fd59366330 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/autograd.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/cpp.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/cpp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4bf284868617aa9efb76a05525517cce1118ed7c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/cpp.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/dispatcher.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/dispatcher.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5f0e8c07ddd30267057566c99df6eb08a254a56 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/dispatcher.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/lazy.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/lazy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7c9788869b7b2f65d3f61adbc83ff8f9ff2f316 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/lazy.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/meta.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/meta.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6458e263f0c7e5bc2fe1e97dc97228ff1b98a93c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/meta.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/python.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/python.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9e3f4e6457e58e389bfbd905f704a88e31b90ef Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/python.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/structured.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/structured.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea3691feeadbb608fe9c40b18bb6bb0b7c6aa29e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/structured.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/translate.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/translate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7e634830d62f79c595874ee96eec83189998fa2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/translate.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/ufunc.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/ufunc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..484c0c0e4dd367e3560f10127ef443ae305baa67 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/ufunc.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/unboxing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/unboxing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a7c4cea4b1b115fff52b47d05beb8cc2b4fee3b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/unboxing.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/api/cpp.py b/env-llmeval/lib/python3.10/site-packages/torchgen/api/cpp.py new file mode 100644 index 0000000000000000000000000000000000000000..f5466030daa6baba3899373d7af220bdfa102b72 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/api/cpp.py @@ -0,0 +1,467 @@ +from typing import List, Optional, Sequence, Set, Union + +from torchgen import local +from torchgen.api.types import ( + ArgName, + ArrayCType, + ArrayRefCType, + BaseCType, + BaseTypeToCppMapping, + Binding, + boolT, + ConstRefCType, + CType, + dimnameListT, + intArrayRefT, + iTensorListRefT, + ListCType, + longT, + MutRefCType, + NamedCType, + OptionalCType, + optionalIntArrayRefT, + optionalSymIntArrayRefT, + scalarT, + SpecialArgName, + symIntArrayRefT, + SymIntT, + tensorListT, + tensorOptionsT, + tensorT, + TupleCType, + VectorCType, + voidT, +) +from torchgen.model import ( + Argument, + Arguments, + BaseTy, + BaseType, + FunctionSchema, + ListType, + NativeFunction, + OptionalType, + Return, + SelfArgument, + TensorOptionsArguments, + Type, +) +from torchgen.utils import assert_never + +# This file describes the translation of JIT schema to the public C++ +# API, which is what people use when they call functions like at::add. +# +# Prominent characteristics of the C++ API: +# +# - dtype, layout, device and pin_memory are collected into +# a single C++ type TensorOptions (the native functions API +# also has this, but tensor options is really most relevant +# for the C++ API; it makes calling kwarg factory functions +# pleasant) +# +# - defaulting lives here (in fact, the dispatcher is completely +# oblivious of defaults!) +# +# BTW: policy on name collisions: we try not to have types with +# collisions, but functions are fair game to collide + + +def name( + func: FunctionSchema, + *, + faithful_name_for_out_overloads: bool = False, + symint_overload: bool = False, +) -> str: + name = str(func.name.name) + if symint_overload: + name += "_symint" + if func.is_out_fn(): + if faithful_name_for_out_overloads: + name += "_outf" + else: + name += "_out" + + return name + + +# Translation of "value types" in JIT schema to C++ API type. Value +# types look the same no matter if they are argument types or return +# types. Returns None if the type in question is not a value type. +def valuetype_type( + t: Type, + *, + binds: ArgName, + remove_non_owning_ref_types: bool = False, + symint: bool = False, +) -> Optional[NamedCType]: + if isinstance(t, BaseType): + if t.name == BaseTy.Tensor or t.name == BaseTy.Scalar: + return None + elif str(t) == "SymInt": + if symint: + return NamedCType(binds, BaseCType(SymIntT)) + else: + return NamedCType(binds, BaseCType(longT)) + if remove_non_owning_ref_types: + if t.name == BaseTy.str: + raise AssertionError( + "string ref->value conversion: not implemented yet" + ) + # All other BaseType currently map directly to BaseCppTypes. + return NamedCType(binds, BaseCType(BaseTypeToCppMapping[t.name])) + elif isinstance(t, OptionalType): + elem = valuetype_type(t.elem, binds=binds, symint=symint) + if elem is None: + return None + return NamedCType(binds, OptionalCType(elem.type)) + elif isinstance(t, ListType): + if str(t.elem) == "bool": + assert t.size is not None + return NamedCType(binds, ArrayCType(BaseCType(boolT), t.size)) + else: + return None + else: + raise AssertionError(f"unrecognized type {repr(t)}") + + +# Translation of types occurring in JIT arguments to a C++ argument type. +# If remove_non_owning_ref_types is set, we'll guarantee that the outputed CType is not a non-owning reference type. +# For example, we'll return std::vector instead of IntArrayRef. +# See Note [translation from C++ reference to value types] +def argumenttype_type( + t: Type, + *, + mutable: bool, + binds: ArgName, + remove_non_owning_ref_types: bool = False, + symint: bool = False, +) -> NamedCType: + # If it's a value type, do the value type translation + r = valuetype_type( + t, + binds=binds, + symint=symint, + remove_non_owning_ref_types=remove_non_owning_ref_types, + ) + if r is not None: + return r + + if isinstance(t, BaseType): + if t.name == BaseTy.Tensor: + if mutable and not local.use_const_ref_for_mutable_tensors(): + return NamedCType(binds, MutRefCType(BaseCType(tensorT))) + else: + return NamedCType(binds, ConstRefCType(BaseCType(tensorT))) + elif t.name == BaseTy.Scalar: + return NamedCType(binds, ConstRefCType(BaseCType(scalarT))) + else: + raise AssertionError(f"base type should have been value type {t}") + elif isinstance(t, OptionalType): + if str(t.elem) == "Tensor": + if mutable and not local.use_const_ref_for_mutable_tensors(): + return NamedCType( + binds, MutRefCType(BaseCType(tensorT)) + ) # TODO: fix this discrepancy + else: + return NamedCType( + binds, ConstRefCType(OptionalCType(BaseCType(tensorT))) + ) + elif str(t.elem) == "Scalar": + return NamedCType(binds, ConstRefCType(OptionalCType(BaseCType(scalarT)))) + elif isinstance(t.elem, ListType) and str(t.elem.elem) == "int": + return NamedCType(binds, BaseCType(optionalIntArrayRefT)) + elif isinstance(t.elem, ListType) and str(t.elem.elem) == "SymInt": + if symint: + return NamedCType(binds, BaseCType(optionalSymIntArrayRefT)) + else: + return NamedCType(binds, BaseCType(optionalIntArrayRefT)) + elem = argumenttype_type(t.elem, mutable=mutable, binds=binds, symint=symint) + return NamedCType(binds, OptionalCType(elem.type)) + elif isinstance(t, ListType): + # TODO: remove these special cases, ArrayRef fallthrough works fine + if str(t.elem) == "int": + if remove_non_owning_ref_types: + return NamedCType(binds, VectorCType(BaseCType(longT))) + else: + return NamedCType(binds, BaseCType(intArrayRefT)) + if str(t.elem) == "SymInt": + if remove_non_owning_ref_types: + if symint: + return NamedCType(binds, VectorCType(BaseCType(SymIntT))) + else: + return NamedCType(binds, VectorCType(BaseCType(longT))) + else: + if symint: + return NamedCType(binds, BaseCType(symIntArrayRefT)) + else: + return NamedCType(binds, BaseCType(intArrayRefT)) + if str(t.elem) == "Tensor": + if local.use_ilistref_for_tensor_lists(): + return NamedCType(binds, ConstRefCType(BaseCType(iTensorListRefT))) + else: + return NamedCType(binds, BaseCType(tensorListT)) + elif str(t.elem) == "Scalar": + return NamedCType(binds, ArrayRefCType(BaseCType(scalarT))) + elif str(t.elem) == "Dimname": + return NamedCType(binds, BaseCType(dimnameListT)) + elif str(t.elem) == "Tensor?": + return NamedCType( + binds, ConstRefCType(ListCType(OptionalCType(BaseCType(tensorT)))) + ) + elem = argumenttype_type(t.elem, mutable=mutable, binds=binds, symint=symint) + return NamedCType(binds, ArrayRefCType(elem.type)) + else: + raise AssertionError(f"unrecognized type {repr(t)}") + + +# Translate a JIT argument into its C++ type +def argument_type(a: Argument, *, binds: ArgName, symint: bool = False) -> NamedCType: + return argumenttype_type(a.type, mutable=a.is_write, symint=symint, binds=binds) + + +# Translation of a (non-multi) return type from JIT to C++ +# N.B: returntype_type returns a CType, not a NamedCType. +# This is mostly because of the mismatch between return types and return names. +# e.g. a function with a return type of 'void' has 0 return names, +# and a function with a return type of 'std::tuple' has >1 return name. +def returntype_type(t: Type, *, mutable: bool, symint: bool = False) -> CType: + # placeholder is ignored + # NB: symint is ALWAYS respected for return types. So symint argument + # here is IGNORED + r = valuetype_type(t, binds="__placeholder__", symint=True) + if r is not None: + return r.type + + if isinstance(t, BaseType): + if t.name == BaseTy.Tensor: + if mutable: + if local.use_const_ref_for_mutable_tensors(): + return ConstRefCType(BaseCType(tensorT)) + else: + return MutRefCType(BaseCType(tensorT)) + else: + # Note [Tensor Copy Returns] + # Currently, we use "Argument.is_write" to determine + # whether or not Tensor return types should be copies or references. + # If that ever changes, take a look at other locations of this note! + return BaseCType(tensorT) + elif t.name == BaseTy.Scalar: + return BaseCType(scalarT) + elif isinstance(t, ListType): + assert ( + not mutable + ), "Native functions should never return a mutable tensor list. They should return void." + elem = returntype_type(t.elem, mutable=False) + assert t.size is None, f"fixed size list returns not supported: {t}" + return VectorCType(elem) + elif isinstance(t, OptionalType): + elem = returntype_type(t.elem, mutable=mutable) + if str(t.elem) == "Tensor": + return OptionalCType(elem) + + raise AssertionError(f"unrecognized return type {t}") + + +# Translation of a single return to its C++ type +def return_type(r: Return, *, symint: bool = False) -> CType: + return returntype_type(r.type, mutable=r.is_write, symint=symint) + + +# Translation of a full (possibly multi) return from JIT to its C++ type +def returns_type(rs: Sequence[Return], *, symint: bool = False) -> CType: + if len(rs) == 0: + return BaseCType(voidT) + elif len(rs) == 1: + return return_type(rs[0], symint=symint) + else: + return TupleCType([return_type(r, symint=symint) for r in rs]) + + +def return_names(f: NativeFunction, *, fallback_name: str = "result") -> Sequence[str]: + returns: List[str] = [] + for i, r in enumerate(f.func.returns): + # If we have an inplace function, the return argument is + # implicitly named self. + # TODO: Consider incorporating this into the data model + if f.func.name.name.inplace: + assert i == 0, "illegal inplace function with multiple returns" + name = "self" + # If we are out function, the name is the name of the + # corresponding output function (r.name will get recorded + # in field_name later.) + elif f.func.is_out_fn(): + name = f.func.arguments.out[i].name + # If the return argument is explicitly named... + elif r.name: + name_conflict = any( + r.name == a.name for a in f.func.schema_order_arguments() + ) + if name_conflict and not f.func.is_out_fn(): + name = f"{r.name}_return" + else: + name = r.name + # If there is no explicit name and no fallback name was passed in, we just name the output result, + # unless it's a multi-return, in which case it's result0, + # result1, etc (zero-indexed) + else: + name = fallback_name if len(f.func.returns) == 1 else f"{fallback_name}{i}" + returns.append(name) + return returns + + +JIT_TO_CPP_DEFAULT = { + "False": "false", + "True": "true", + "None": "c10::nullopt", # UGH this one is type directed + "Mean": "at::Reduction::Mean", + "[]": "{}", + "contiguous_format": "MemoryFormat::Contiguous", + "long": "at::kLong", +} + + +# Convert a JIT default into C++ expression representing the default +def default_expr(d: str, t: Type, *, symint: bool) -> str: + if d == "None" and str(t) == "Tensor?": + return "{}" + if isinstance(t, BaseType) and t.name is BaseTy.str: + # Schema allows single quotes but C++ needs double + if len(d) >= 2 and d[0] == "'" and d[-1] == "'": + s = "" + i = 1 + while i + 1 < len(d): + if d[i] != "\\": + if d[i] == '"': + s += '\\"' + else: + s += d[i] + i += 1 + else: + if d[i + 1] == "'": + s += "'" + else: + s += d[i : i + 2] + i += 2 + + return f'"{s}"' + + if isinstance(t, OptionalType): + if d == "None": + return "c10::nullopt" + + return default_expr(d, t.elem, symint=symint) + + if isinstance(t, ListType): + if d.startswith("[") and d.endswith("]"): + return "{" + d[1:-1] + "}" + elif symint and d.isdigit() and str(t.elem) == "SymInt": + return f"c10::SymInt({d})" + elif t.size is None: + # NOTE: Sized lists can have scalar defaults + raise ValueError(f"Expected a list default '[...]' but found: '{d}'") + + return JIT_TO_CPP_DEFAULT.get(d, d) + + +# Convert an argument into its C++ API form + + +def argument( + a: Union[Argument, TensorOptionsArguments, SelfArgument], + *, + cpp_no_default_args: Set[str], + method: bool, + faithful: bool, + symint: bool = False, + has_tensor_options: bool, +) -> List[Binding]: + def sub_argument( + a: Union[Argument, TensorOptionsArguments, SelfArgument] + ) -> List[Binding]: + return argument( + a, + cpp_no_default_args=cpp_no_default_args, + method=method, + faithful=faithful, + symint=symint, + has_tensor_options=has_tensor_options, + ) + + if isinstance(a, Argument): + binds: ArgName + if a.name == "memory_format" and has_tensor_options: + binds = SpecialArgName.possibly_redundant_memory_format + else: + binds = a.name + default: Optional[str] = None + if a.name not in cpp_no_default_args and a.default is not None: + default = default_expr(a.default, a.type, symint=symint) + return [ + Binding( + nctype=argument_type(a, binds=binds, symint=symint), + name=a.name, + default=default, + argument=a, + ) + ] + elif isinstance(a, TensorOptionsArguments): + if faithful: + return ( + sub_argument(a.dtype) + + sub_argument(a.layout) + + sub_argument(a.device) + + sub_argument(a.pin_memory) + ) + else: + default = None + # Enforced by NativeFunction.__post_init__ + assert "options" not in cpp_no_default_args + if all(x.default == "None" for x in a.all()): + default = "{}" + elif a.dtype.default == "long": + default = "at::kLong" # TODO: this is wrong + return [ + Binding( + nctype=NamedCType("options", BaseCType(tensorOptionsT)), + name="options", + default=default, + argument=a, + ) + ] + elif isinstance(a, SelfArgument): + if method: + # Caller is responsible for installing implicit this in context! + return [] + else: + return sub_argument(a.argument) + else: + assert_never(a) + + +def arguments( + arguments: Arguments, + *, + faithful: bool, + symint: bool = False, + method: bool, + cpp_no_default_args: Set[str], +) -> List[Binding]: + args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = [] + if faithful: + args.extend(arguments.non_out) + args.extend(arguments.out) + else: + args.extend(arguments.out) + args.extend(arguments.non_out) + return [ + r.no_default() if faithful else r + for a in args + for r in argument( + a, + faithful=faithful, + symint=symint, + method=method, + has_tensor_options=arguments.tensor_options is not None, + cpp_no_default_args=cpp_no_default_args, + ) + ] diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/api/dispatcher.py b/env-llmeval/lib/python3.10/site-packages/torchgen/api/dispatcher.py new file mode 100644 index 0000000000000000000000000000000000000000..58816959f7cd2276274d71436c46d2c36315c631 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/api/dispatcher.py @@ -0,0 +1,118 @@ +import itertools +from typing import List, Sequence, Union + +from torchgen.api import cpp + +from torchgen.api.types import ArgName, Binding, CType, NamedCType +from torchgen.model import ( + Argument, + FunctionSchema, + Return, + SelfArgument, + TensorOptionsArguments, + Type, +) +from torchgen.utils import assert_never, concatMap + +# This file describes the translation of JIT schema to the dispatcher +# API, the *unboxed* calling convention by which invocations through +# the dispatcher are made. Historically, the dispatcher API matched +# the C++ API, but with the establishment of the boxed API, we've +# made changes to the dispatcher API to so that the unboxed API +# better aligns with the boxed API. The dispatcher API hooks heavily +# into our template based boxing/unboxing machinery, so changes +# to this convention will usually need template updates too. +# +# Prominent characteristics of the dispatcher API: +# +# - dtype, layout, device and pin_memory are represented as separate +# arguments. +# + + +def name(func: FunctionSchema) -> str: + return cpp.name(func) + + +def argumenttype_type( + t: Type, + *, + mutable: bool, + binds: ArgName, + remove_non_owning_ref_types: bool = False, + symint: bool = True, +) -> NamedCType: + # This is a faux amis. If it makes sense in the future to add + # more special cases here, or invert things so cpp.argument_type + # calls this, or just completely inline the function, please do + # it. + return cpp.argumenttype_type( + t, + mutable=mutable, + binds=binds, + symint=symint, + remove_non_owning_ref_types=remove_non_owning_ref_types, + ) + + +def argument_type( + a: Argument, + *, + binds: ArgName, + remove_non_owning_ref_types: bool = False, + symint: bool = True, +) -> NamedCType: + return argumenttype_type( + a.type, + mutable=a.is_write, + binds=binds, + remove_non_owning_ref_types=remove_non_owning_ref_types, + symint=symint, + ) + + +def returns_type(rs: Sequence[Return], *, symint: bool = True) -> CType: + # At present, there is no difference. But there could be! + return cpp.returns_type(rs, symint=symint) + + +def jit_arguments(func: FunctionSchema) -> List[Argument]: + def to_argument( + a: Union[Argument, TensorOptionsArguments, SelfArgument] + ) -> List[Argument]: + if isinstance(a, Argument): + return [a] + elif isinstance(a, SelfArgument): + return [a.argument] + elif isinstance(a, TensorOptionsArguments): + return [a.dtype, a.layout, a.device, a.pin_memory] + else: + assert_never(a) + + return list( + concatMap( + to_argument, + itertools.chain( + func.arguments.positional, func.arguments.kwarg_only, func.arguments.out + ), + ) + ) + + +def argument( + a: Argument, *, remove_non_owning_ref_types: bool = False, symint: bool = True +) -> Binding: + return Binding( + nctype=argument_type( + a, + binds=a.name, + remove_non_owning_ref_types=remove_non_owning_ref_types, + symint=symint, + ), + name=a.name, + argument=a, + ) + + +def arguments(func: FunctionSchema, *, symint: bool = True) -> List[Binding]: + return [argument(a, symint=symint) for a in jit_arguments(func)] diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/api/functionalization.py b/env-llmeval/lib/python3.10/site-packages/torchgen/api/functionalization.py new file mode 100644 index 0000000000000000000000000000000000000000..0b86dd547f7d474c74e8b6a74c9c220eac3e6d36 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/api/functionalization.py @@ -0,0 +1,176 @@ +from typing import List, Optional + +from torchgen.api import dispatcher +from torchgen.api.types import ( + BaseCType, + Binding, + boolT, + ConstRefCType, + CType, + longT, + NamedCType, + tensorT, +) +from torchgen.model import ( + Argument, + BaseTy, + BaseType, + FunctionSchema, + NativeFunctionsViewGroup, +) + + +# This file describes the translation of JIT schema to API's used +# when creating view lambdas that are used by the functionalization pass. +# There are two types of lambdas: forward lambdas and reverse lambdas. +# These API's mostly follow the dispatcher API, with a few quirks: +# - The lambda capture has to convert reference types to value types +# - While the forward lambda just directly calls into the at::_ops API +# (following the dispatcher convention), the logic here for the reverse lambda +# is responsible for generating both the call-site, and the declarations +# (which are implemented manually in the at::functionalization::impl namespace). + +# The lambdas generated for each view op in the functionalization pass are of the form +# [capture_arguments](outer_arguments) -> returns_type { +# return name(inner_arguments); +# } + +# Define some specific lambda input arguments. +base_binding = Binding( + name="base", + nctype=NamedCType(name="base", type=ConstRefCType(BaseCType(tensorT))), + argument=Argument( + name="base", type=BaseType(BaseTy.Tensor), default=None, annotation=None + ), + default=None, +) +mutated_view_binding = Binding( + name="mutated_view", + nctype=NamedCType(name="mutated_view", type=ConstRefCType(BaseCType(tensorT))), + argument=Argument( + name="base", type=BaseType(BaseTy.Tensor), default=None, annotation=None + ), + default=None, +) +mutated_view_idx_binding = Binding( + name="mutated_view_idx", + nctype=NamedCType(name="mutated_view_idx", type=BaseCType(longT)), + argument=Argument( + name="base", type=BaseType(BaseTy.Tensor), default=None, annotation=None + ), + default=None, +) +reapply_views_binding = Binding( + name="reapply_views", + nctype=NamedCType(name="reapply_views", type=BaseCType(boolT)), + argument=Argument( + name="reapply_views", type=BaseType(BaseTy.bool), default=None, annotation=None + ), + default=None, +) + + +# The lambda capture itself doesn't have a name. +# The name returned here corresponds to the name of the inner function called by the lambda. +def name( + g: NativeFunctionsViewGroup, + *, + is_reverse: bool, + include_namespace: bool, + reapply_views: Optional[bool] = None, +) -> str: + if reapply_views is None: + # reapply_views is only important for the fwd lambda, + # since we always plumb the runtime "reapply_views" argument into the reverse function. + assert is_reverse + if is_reverse: + # for the reverse: the name of the inverse function always involves "view_copy", + # and we plumb the "reapply_views" flag into that function. + # (We could avoid doing that, but that would require writing out twice as many view inverse functions). + assert g.view_copy is not None + api_name = g.view_copy.func.name.unambiguous_name() + # in the reverse case, we codegen both the call-sites (which need the full namespace) and the declarations (which don't) + if include_namespace: + return f"at::functionalization::FunctionalInverses::{api_name}_inverse" + else: + return f"{api_name}_inverse" + # in the forward case, we just directly call into the at::_ops API (so we always need the namespace) + assert include_namespace + assert g.view_copy is not None + api_name = ( + g.view.func.name.unambiguous_name() + if reapply_views + else g.view_copy.func.name.unambiguous_name() + ) + return f"at::_ops::{api_name}::call" + + +def capture_arguments(func: FunctionSchema, *, is_reverse: bool) -> List[Binding]: + # capture arguments include all arguments except `self`. + # Importantly, they don't include any C++ reference types (or else we'll get a dangling reference in the capture), + # So any reference types (IntArrayRef) need to be converted to value types (vector) + args = func.arguments.flat_all + assert args[0].type == BaseType(BaseTy.Tensor) + non_self_args = args[1:] + non_self_value_bindings = [ + dispatcher.argument(a, remove_non_owning_ref_types=True) for a in non_self_args + ] + all_bindings = [reapply_views_binding] + non_self_value_bindings + return all_bindings + + +def returns_type(func: FunctionSchema) -> CType: + # Assertion: all view ops return tensor-like outputs + assert len(func.returns) >= 1 + for ret in func.returns: + assert ret.type.is_tensor_like() + # However, the return type of the lambda is always an individual tensor. + # For multi-tensor outputs, each tensor needs to be tracked individually. + return BaseCType(tensorT) + + +def outer_arguments(*, is_reverse: bool) -> List[Binding]: + if is_reverse: + return [base_binding, mutated_view_binding, mutated_view_idx_binding] + else: + return [base_binding, mutated_view_idx_binding] + + +def inner_call_index(func: FunctionSchema) -> Optional[Binding]: + # For view ops that return multiple tensors (like `split`), we generate a separate lambda for each output. + # When we replay a view op that returns multiple tensors, we need to index into the output appropriately + if len(func.returns) > 1 or ( + len(func.returns) == 1 and func.returns[0].type.is_list_like() + ): + return mutated_view_idx_binding + return None + + +def inner_arguments(func: FunctionSchema, is_reverse: bool) -> List[Binding]: + args = func.arguments.flat_all + assert args[0].type == BaseType(BaseTy.Tensor) + non_self_args = args[1:] + # The forward lambda calls the at::_ops API, while the reverse lambda calls the view inverse API. + # Both of these follow the dispatcher API. + non_self_bindings = [dispatcher.argument(a) for a in non_self_args] + if not is_reverse: + # the forward lambda swaps out the original tensor argument with the lambd arg "base" + return [base_binding] + non_self_bindings + else: + # the reverse lambda does the same, but with an additional "mutated_view" arg + # additionally, we have a calling convention: for view ops that return multiple tensor outputs + # their corresponding view_inverse function takes in an additional index argument. + index_binding = inner_call_index(func) + if index_binding is not None: + return [ + base_binding, + mutated_view_binding, + reapply_views_binding, + index_binding, + ] + non_self_bindings + else: + return [ + base_binding, + mutated_view_binding, + reapply_views_binding, + ] + non_self_bindings diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/api/meta.py b/env-llmeval/lib/python3.10/site-packages/torchgen/api/meta.py new file mode 100644 index 0000000000000000000000000000000000000000..ad488d303d46329ba198d7f077b617704655b3b6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/api/meta.py @@ -0,0 +1,12 @@ +from torchgen.model import NativeFunctionsGroup + +# Follows dispatcher calling convention, but: +# - Mutable arguments not allowed. Meta functions are always +# written in functional form. Look at FunctionSchema.signature() +# - No tensor returns; instead we return a TensorMeta describing +# the tensor in question + + +def name(g: NativeFunctionsGroup) -> str: + # use the overload name from the functional version + return str(g.functional.func.name).replace(".", "_") diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/api/native.py b/env-llmeval/lib/python3.10/site-packages/torchgen/api/native.py new file mode 100644 index 0000000000000000000000000000000000000000..7f8b3eb3af2e7e90ade39afb0f3c559951b69b99 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/api/native.py @@ -0,0 +1,153 @@ +from typing import List, Optional, Sequence, Union + +from torchgen import local +from torchgen.api import cpp + +from torchgen.api.types import ( + ArgName, + BaseCType, + Binding, + boolT, + ConstRefCType, + CType, + deviceT, + layoutT, + ListCType, + MutRefCType, + NamedCType, + OptionalCType, + scalarT, + scalarTypeT, + tensorT, +) +from torchgen.model import ( + Argument, + FunctionSchema, + Return, + SelfArgument, + TensorOptionsArguments, + Type, +) +from torchgen.utils import assert_never + +# This file describes the translation of JIT schema to the native functions API. +# This looks a lot like the C++ API (which makes historical sense, because the +# idea was you wrote native functions to implement functions in the C++ API), +# but over time we have evolved the C++ API without actually changing our +# native:: kernels. The intention is to make native API and dispatcher API +# line up as closely as possible, since this results in the least overhead +# (no translation is needed from dispatcher API to native API). +# +# NB: this is symint aware, you will get the non-SymInt variant for some +# dispatch entries and SymInt for others. + + +def name(func: FunctionSchema) -> str: + name = str(func.name.name) + # TODO: delete this! + if func.is_out_fn(): + name += "_out" + if func.name.overload_name: + name += f"_{func.name.overload_name}" + return name + + +def argumenttype_type( + t: Type, *, mutable: bool, binds: ArgName, symint: bool +) -> NamedCType: + if str(t) == "Tensor?": + tensor_type: OptionalCType = OptionalCType(BaseCType(tensorT)) + if mutable and not local.use_const_ref_for_mutable_tensors(): + return NamedCType(binds, MutRefCType(tensor_type)) + else: + return NamedCType(binds, ConstRefCType(tensor_type)) + elif str(t) == "Tensor?[]": + return NamedCType( + binds, ConstRefCType(ListCType(OptionalCType(BaseCType(tensorT)))) + ) + elif str(t) == "Scalar": + return NamedCType(binds, ConstRefCType(BaseCType(scalarT))) + elif str(t) == "Scalar?": + return NamedCType(binds, ConstRefCType(OptionalCType(BaseCType(scalarT)))) + return cpp.argumenttype_type(t, mutable=mutable, binds=binds, symint=symint) + + +def returns_type(rs: Sequence[Return], *, symint: bool) -> CType: + return cpp.returns_type(rs, symint=symint) + + +def argument_type(a: Argument, *, binds: ArgName, symint: bool) -> NamedCType: + return argumenttype_type(a.type, mutable=a.is_write, binds=binds, symint=symint) + + +def argument( + a: Union[Argument, SelfArgument, TensorOptionsArguments], + *, + is_out: bool, + symint: bool, +) -> List[Binding]: + # Ideally, we NEVER default native functions. However, there are a number + # of functions that call native:: directly and rely on the defaulting + # existing. So for BC, we generate defaults for non-out variants (but not + # for out variants, where it is impossible to generate an appropriate + # default) + should_default = not is_out + if isinstance(a, Argument): + default: Optional[str] = None + if should_default and a.default is not None: + default = cpp.default_expr(a.default, a.type, symint=symint) + return [ + Binding( + nctype=argument_type(a, binds=a.name, symint=symint), + name=a.name, + default=default, + argument=a, + ) + ] + elif isinstance(a, SelfArgument): + # Erase SelfArgument from the distinction + return argument(a.argument, is_out=is_out, symint=symint) + elif isinstance(a, TensorOptionsArguments): + default = None + if should_default: + default = "{}" + # TODO: Not sure why the arguments assigned here are for + # TensorOptionsArguments and not the constituent pieces. It seems + # to matter + return [ + Binding( + nctype=NamedCType("dtype", OptionalCType(BaseCType(scalarTypeT))), + name="dtype", + default=default, + argument=a, + ), + Binding( + nctype=NamedCType("layout", OptionalCType(BaseCType(layoutT))), + name="layout", + default=default, + argument=a, + ), + Binding( + nctype=NamedCType("device", OptionalCType(BaseCType(deviceT))), + name="device", + default=default, + argument=a, + ), + Binding( + nctype=NamedCType("pin_memory", OptionalCType(BaseCType(boolT))), + name="pin_memory", + default=default, + argument=a, + ), + ] + else: + assert_never(a) + + +def arguments(func: FunctionSchema, *, symint: bool) -> List[Binding]: + args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = [] + args.extend(func.arguments.non_out) + args.extend(func.arguments.out) + return [ + r for arg in args for r in argument(arg, symint=symint, is_out=func.is_out_fn()) + ] diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/api/python.py b/env-llmeval/lib/python3.10/site-packages/torchgen/api/python.py new file mode 100644 index 0000000000000000000000000000000000000000..bc8fadb09beef5a8efcdc26664ec0df26a8bd12b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/api/python.py @@ -0,0 +1,1481 @@ +from dataclasses import dataclass +from typing import Dict, List, Optional, Sequence, Set, Tuple, Union + +from torchgen.api import cpp + +from torchgen.api.types import Binding, CppSignature, CppSignatureGroup +from torchgen.gen import pythonify_default +from torchgen.model import ( + Argument, + BaseTy, + BaseType, + FunctionSchema, + ListType, + NativeFunction, + OptionalType, + Return, + Type, + Variant, +) + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# +# Data Models +# +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# +# [Notes] python binding codegen +# +# The Python binding codegen produces code that takes the input list of +# PyObjects, finds the matching ATen C++ function using PythonArgParser, +# converts the PyObjects into C++ types and calls the ATen C++ function: +# +# +--------+ parsing +------------------------+ binding +-----------------------+ +# | PyObjs | ---------> | PythonArgParser Output | ---------> | Cpp Function Dispatch | +# +--------+ +------------------------+ +-----------------------+ +# +# The following examples demonstrate the data models the Python binding +# codegen needs to deal with and the tasks it needs to accomplish. It +# helps understand the purpose of the new data types we introduced below. +# +# - Function Schema (source of truth) +# +# aten::empty.names(int[] size, *, Dimname[]? names, +# ScalarType? dtype=None, Layout? layout=None, +# Device? device=None, bool? pin_memory=None, +# MemoryFormat? memory_format=None) -> Tensor +# +# - Python Signature +# +# It's used to generate input schema string for PythonArgParser. +# Note: TensorOptions fields are reordered and the additional +# 'requires_grad' field is added: +# +# empty(IntArrayRef size, *, DimnameList? names, +# MemoryFormat? memory_format=None, ScalarType dtype=None, +# Layout layout=torch.strided, Device device=None, +# bool pin_memory=False, bool requires_grad=False) +# +# - C++ Signature +# +# It's used to generate C++ lambda formals & dispatch call. +# Note: the scattered TensorOptions fields are packed into 'options'. +# +# auto dispatch_empty = +# [](IntArrayRef size, c10::optional names, +# const TensorOptions & options, +# c10::optional memory_format) -> Tensor { +# pybind11::gil_scoped_release no_gil; +# return torch::empty(size, names, options, memory_format); +# }; +# +# - Binding between Python Arguments and C++ Arguments +# +# Given a set of Python Arguments in scope, we need produce the +# binding expressions that translate the Python API into C++ API: +# +# Python Args Cpp Args Binding Exprs +# ----------------------------------------------------------------- +# 0: size size '_r.intlist(0)' +# 1: names names 'names' [special init] +# 2: memory_format -------+ +# 3: dtype -----+-|--> options 'options' [special packing] +# 4: layout / | +# 5: device / +--> memory_format '_r.memoryformatOptional(2)' +# 6: pin_memory / +# 7: requires_grad -+ +# +# So the full dispatch expression would look like: +# +# dispatch_empty(_r.intlist(0), names, options, +# _r.memoryformatOptional(2)) +# +# Where does 'names' come from? It involves special local init: +# +# auto __names = _r.toDimnameListOptional(1); +# c10::optional names = +# __names ? c10::make_optional(DimnameList(__names.value())) +# : c10::nullopt; +# +# Where does 'options' come from? It involves special local init +# for TensorOptions. Note that Python side has the additional +# 'requires_grad' field: +# +# const auto options = TensorOptions() +# .dtype(_r.scalartype(3)) +# .device(_r.device(5)) +# .layout(_r.layoutOptional(4)) +# .requires_grad(_r.toBool(7)) +# .pinned_memory(_r.toBool(6)); +# +# In some other cases one Python Argument can map to multiple C++ +# Arguments. For example: +# +# aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) +# -> (Tensor values, Tensor indices) +# +# Python Args Cpp Args Binding Exprs +# --------------------------------------------------------------------- +# +----> max 'out[0]' +# /-----> max_values 'out[1] +# 0: input / self '_r.tensor(0)' +# 1: dim / dim '_r.dimname(1)' +# 2: keepdim / keepdim '_r.toBool(2)' +# 3: out -----+ [local init] out '_r.tensorlist_n<2>(3)' +# +# As demonstrated above, the binding can involve reordering, +# packing, unpacking and special local inits. +# +# +# Let's look at a concrete example: +# +# static PythonArgParser parser({ +# "abs(Tensor input, *, Tensor out=None)", +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# ^ +# +--- Python Schema, represented by PythonSignature and PythonArgument +# +# }, /*traceable=*/true); +# +# ParsedArgs<2> parsed_args; +# auto _r = parser.parse(nullptr, args, kwargs, parsed_args); +# +# ... +# +# if (_r.isNone(1)) { +# ~~~~~~~~~~~~ <--- Scattered PythonArgParser output (arg name = 'out') +# represented by PythonArgParserOutputExpr +# +# // aten::abs(Tensor self) -> Tensor +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# ^ +# +--- NativeFunction schema, base version +# +# auto dispatch_abs = [](const Tensor & self) -> Tensor { +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# ^ +# +--- dispatch_lambda_args / dispatch_lambda_return_str +# generated from NativeFunction / CppSignature +# (deprecated PythonSignature is special) +# arguments are represented by DispatchLambdaArgument +# +# pybind11::gil_scoped_release no_gil; +# return self.abs(); +# ~~~~~~~~~~~ <--- cpp_dispatch_target / cpp_dispatch_exprs +# generated from NativeFunction / CppSignature +# }; +# return wrap(dispatch_abs(_r.tensor(0))); +# ~~~~~~~~~~~~~ +# ^ +# +--- dispatch_lambda_exprs +# binding PythonArgParserOutputExpr (python args) +# and DispatchLambdaArgument (c++ args) +# +# } else { +# // aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# ^ +# +--- NativeFunction schema, out-variant +# +# auto dispatch_abs_out = [](Tensor out, const Tensor & self) -> Tensor { +# pybind11::gil_scoped_release no_gil; +# return at::abs_out(out, self); +# }; +# return wrap(dispatch_abs_out(_r.tensor(1), _r.tensor(0))); +# } +# +# +# [Notes] python interface codegen +# The python dataclasses below are used used to generate both python binding code +# and pyi type hint signatures. +# In theory these two should look very similar, but there are number of differences +# in how pyi signatures vs. python_arg_parser signatures are generated. +# These differences have been encapsulated in signature_str() vs. signature_str_pyi() +# to display the full signatures, and argument_str() vs argument_str_pyi() to display arguments. +# For examples, only pyi signatures include return types. + + +@dataclass(frozen=True) +class PythonReturns: + returns: Tuple[Return, ...] + + +@dataclass(frozen=True) +class PythonArgument: + name: str + type: Type + default: Optional[str] + + # Used to generate the default init expr for some PythonArgParser outputs, e.g.: + # + # _r.layoutWithDefault(3, layout_from_backend(self.options().backend()))) + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + # ^ + # +--- default_init str + default_init: Optional[str] + + # Compute argument formal for python argument parsing. + # Needs to be consistent with torch/csrc/utils/python_arg_parser.h. + def argument_str(self, *, method: bool = False, symint: bool = True) -> str: + type_str = ( + argument_type_str(self.type, symint=symint) + .replace("const ", "") + .replace(" &", "") + ) + + name = self.name + # s/self/input/ outside method bindings + # [old codegen] TODO: remove this? doesn't rename in codegen, it's just + # for the parse string + if name == "self" and type_str in ["Tensor", "Number"] and not method: + name = "input" + + # add default + if self.default is not None: + default = { + "nullptr": "None", + "c10::nullopt": "None", + "{}": "None", + }.get(self.default, self.default) + return f"{type_str} {name}={default}" + else: + return f"{type_str} {name}" + + def argument_str_pyi( + self, *, method: bool = False, deprecated: bool = False + ) -> str: + type_str = argument_type_str_pyi(self.type) + + name = self.name + # s/self/input/ outside method bindings + # [old codegen] TODO: remove this? doesn't rename in codegen, it's just + # for the parse string + if name == "self" and type_str == "Tensor" and not method and not deprecated: + name = "input" + + if name == "from": # from is a Python keyword... + name += "_" + + # pyi merges the _out and functional variants into the same signature, with an optional out arg + if name == "out" and type_str == "Tensor" and not deprecated: + type_str = "Optional[" + type_str + "]" + + # pyi deprecated signatures don't get defaults for their out arg + treat_as_no_default = ( + deprecated + and isinstance(self, PythonOutArgument) + and self.default == "None" + ) + + # add default + if self.default is not None and not treat_as_no_default: + if ( + isinstance(self.type, ListType) + and self.type.elem == BaseType(BaseTy.int) + and self.default.startswith("{") + and self.default.endswith("}") + ): + default = "(" + self.default[1:-1] + ")" + else: + default = { + "nullptr": "None", + "c10::nullopt": "None", + "{}": "None", + "MemoryFormat::Contiguous": "contiguous_format", + "QScheme::PER_TENSOR_AFFINE": "per_tensor_affine", + }.get(self.default, self.default) + return f"{name}: {type_str} = {default}" + else: + return f"{name}: {type_str}" + + +@dataclass(frozen=True) +class PythonOutArgument(PythonArgument): + # In Python signature multiple output fields are packed into one 'out' argument. + # When binding to C++, it's first binded to a local 'out' variable: + # 'auto out = _r.tensorlist_n<2>(2);', + # then binded to scattered C++ output arguments as 'out[0]', 'out[1]', and etc. + # TODO: maybe don't need keep scattered out fields for python signature? + outputs: Tuple[PythonArgument, ...] + + @staticmethod + def from_outputs( + outputs: Tuple[PythonArgument, ...] + ) -> Optional["PythonOutArgument"]: + if not outputs: + return None + + size = len(outputs) + if size == 1: + return PythonOutArgument( + name=outputs[0].name, + type=outputs[0].type, + default="None", + default_init=None, + outputs=outputs, + ) + elif size > 1: + if any(not a.type.is_tensor_like() for a in outputs): + raise RuntimeError(f"Unsupported output type: {outputs}") + return PythonOutArgument( + name="out", + # TODO: shouldn't this be OptionalType[ListType[...]], since it defaults to None? + type=ListType(BaseType(BaseTy.Tensor), size), + default="None", + default_init=None, + outputs=outputs, + ) + raise AssertionError(r"Unexpected PythonOutArgument size") + + +@dataclass(frozen=True) +class PythonSignature: + # Base operator name, without inplace/outplace suffix. + name: str + + # Positional arguments. + # TODO: create a dedicated SelfArgument type for 'self'? + input_args: Tuple[PythonArgument, ...] + + # Keyword arguments excluding the 'out' argument and scattered kwargs belonging + # to TensorOptions (dtype, layout, device, pin_memory, requires_grad, etc). + input_kwargs: Tuple[PythonArgument, ...] + + output_args: Optional[PythonOutArgument] + + # Return types, which are only used by pyi + returns: PythonReturns + + # These are scattered kwargs arguments belonging to TensorOptions. + # When binding to C++, they are packed into a TensorOptions object 'options'. + # It's possible that the C++ signature doesn't take TensorOptions object (e.g. + # for out variant), in which case they will be used as scattered fields without + # being packed into 'options'. + # TODO: maybe create a PythonTensorOptionsArgument? + tensor_options_args: Tuple[PythonArgument, ...] + + # method or function signature? + method: bool + + @property + def deprecated(self) -> bool: + return False + + def arguments( + self, *, skip_outputs: bool = False, skip_tensor_options: bool = False + ) -> Tuple[Union[PythonArgument, PythonOutArgument], ...]: + result: List[Union[PythonArgument, PythonOutArgument]] = [] + result.extend(self.input_args) + result.extend(self.input_kwargs) + if self.output_args is not None and not skip_outputs: + result.append(self.output_args) + if not skip_tensor_options: + result.extend(self.tensor_options_args) + return tuple(result) + + def arguments_count(self) -> int: + return len(self.arguments()) + + def output_idx(self) -> int: + return len(self.input_args) + len(self.input_kwargs) + + # [old codegen] Compute the Python function signature for argument parsing, + # as specified in torch/csrc/utils/python_arg_parser.h. WARNING: + # this is NOT the same type signature as specified by PEP 484 + # as understood by mypy; our format was independently developed + # and has some quirks to make it more suitable specifically + # for error parsing. + # + # For a translation to mypy-valid type signatures, see + # signature_str_pyi(). + def signature_str(self, *, skip_outputs: bool = False, symint: bool = True) -> str: + args = self.arguments(skip_outputs=skip_outputs) + schema_formals: List[str] = [ + a.argument_str(method=self.method, symint=symint) for a in args + ] + positional_argc = len(self.input_args) + if len(schema_formals) > positional_argc: + schema_formals.insert(positional_argc, "*") + + return f'{self.name}({", ".join(schema_formals)})' + + def signature_str_pyi(self, *, skip_outputs: bool = False) -> str: + args = self.arguments(skip_outputs=skip_outputs) + schema_formals: List[str] = [ + a.argument_str_pyi(method=self.method) for a in args + ] + positional_argc = len(self.input_args) + if len(schema_formals) > positional_argc: + schema_formals.insert(positional_argc, "*") + + # only pyi signatures include returns + returns_str = returns_str_pyi(self) + # pyi also includes self (with no typing/defaults) for methods + if self.method: + schema_formals.insert(0, "self") + return f'def {self.name}({", ".join(schema_formals)}) -> {returns_str}: ...' + + def signature_str_pyi_vararg(self, *, skip_outputs: bool = False) -> Optional[str]: + # only pyi uses vararg signatures + args = self.arguments(skip_outputs=skip_outputs) + schema_formals: List[str] = [ + a.argument_str_pyi(method=self.method) for a in args + ] + # vararg only applies to pyi signatures. vararg variants are not generated for all signatures + num_args = self.arguments_count() + num_positionalargs = len(self.input_args) + + have_vararg_version = False + if num_args > 0: + vararg_type = args[0].type + if ( + isinstance(vararg_type, ListType) + and str(vararg_type.elem) in ["int", "SymInt"] + and num_positionalargs == 1 + ): + have_vararg_version = True + + if not have_vararg_version: + return None + # Below are the major changes in vararg vs. regular pyi signatures + # vararg signatures also omit the asterix + schema_formals[0] = "*" + args[0].name + ": _int" + + returns_str = returns_str_pyi(self) + # pyi also includes self (with no typing/defaults) for methods + if self.method: + schema_formals.insert(0, "self") + return f'def {self.name}({", ".join(schema_formals)}) -> {returns_str}: ...' + + +# The deprecated python signature involves some special logic, so create a +# dedicated data model to store these extra properties. +@dataclass(frozen=True) +class PythonSignatureDeprecated(PythonSignature): + # Schema for the deprecated function + deprecated_schema: FunctionSchema + + # The deprecated signature might miss some arguments that the corresponding + # C++ signature expects. We need store the constant default values to pass in. + # For example: + # [deprecate signature]: addmm(Scalar beta, Tensor self, Tensor mat1, Tensor mat2) + # [func schema]: aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + # [func call]: self.addmm(mat1, mat2, beta, 1) + # We store ['self', 'mat1', 'mat2', 'beta', '1'] in this case. + deprecated_args_exprs: Tuple[str, ...] + + @property + def deprecated(self) -> bool: + return True + + def signature_str(self, *, skip_outputs: bool = False, symint: bool = True) -> str: + return ( + PythonSignature.signature_str( + self, skip_outputs=skip_outputs, symint=symint + ) + + "|deprecated" + ) + + def signature_str_pyi(self, *, skip_outputs: bool = False) -> str: + args = self.arguments(skip_outputs=skip_outputs) + schema_formals: List[str] = [ + a.argument_str_pyi(method=self.method, deprecated=True) for a in args + ] + positional_argc = len(self.input_args) + if len(schema_formals) > positional_argc: + schema_formals.insert(positional_argc, "*") + + returns_str = returns_str_pyi(self) + return f'def {self.name}({", ".join(schema_formals)}) -> {returns_str}: ...' + + def signature_str_pyi_vararg(self, *, skip_outputs: bool = False) -> Optional[str]: + # the codegen doesn't include vararg variants for deprecated signatures + return None + + +# This struct is used to hold the PythonSignature and its corresponding +# NativeFunction BEFORE grouping base and out-variant functions. +# Why not store NativeFunction in PythonSignature or construct PythonSignature +# from NativeFunction? Because they are not 1-1 mapped. +# One native function could have both deprecated and non-deprecated python +# signatures - NativeFunction doesn't contain information to construct the +# deprecated python signature. +# One python signature is used to handle both the base and the out-variant +# function - see 'PythonSignatureGroup'. +@dataclass(frozen=True) +class PythonSignatureNativeFunctionPair: + signature: PythonSignature + function: NativeFunction + + +# We merge pairs of functions with signatures that are equivalent mod +# output arguments, and use a single entry in the python_arg_parser sig +# list for both (output arguments become optional). +@dataclass(frozen=True) +class PythonSignatureGroup: + # The signature used for Python argument parsing. The outplace signature + # is preferred if exists, because it can be used to parse inputs for both + # the out-place variant and the base version (with output omitted). + signature: PythonSignature + + # The regular ATen declaration (e.g. conv2d) + base: NativeFunction + + # The out variant (e.g. conv2d_out) + outplace: Optional[NativeFunction] + + @classmethod + def from_pairs( + cls, + functional: PythonSignatureNativeFunctionPair, + out: Optional[PythonSignatureNativeFunctionPair], + ) -> "PythonSignatureGroup": + if out is None: + return PythonSignatureGroup( + signature=functional.signature, + base=functional.function, + outplace=None, + ) + + # prefer the signature with optional out=... arguments because it's the + # superset that can be used to parse input for both base and outplace. + signature_kwargs = out.signature.__dict__.copy() + + # Out overloads in C++ don't have TensorOptions arguments, + # so take these from the functional variant + signature_kwargs[ + "tensor_options_args" + ] = functional.signature.tensor_options_args + + return PythonSignatureGroup( + signature=type(out.signature)(**signature_kwargs), + base=functional.function, + outplace=out.function, + ) + + +# C++ function dispatch is wrapped in a lambda function. The lambda function +# has almost the same signature as the C++ function, only with some small +# variants - see details below. +# This data model is used to represent arguments of the lambda function +# signature. +@dataclass(frozen=True) +class DispatchLambdaArgument: + name: str + type_str: str + is_out_arg: bool + + +# To pass PyObjects arguments to C++ function (via the lambda wrapper), +# we need first convert PyObjects into simple C++ objects. This work +# is done by PythonArgParser. +# This data model is used to represent the output of PythonArgParser. +# It has 1-1 mapping with PythonArgument in PythonSignature. +@dataclass(frozen=True) +class PythonArgParserOutputExpr: + # argument name + name: str + + # RHS expression to reference PythonArgParser output. + expr: str + + # In some special cases we need create different expr, e.g.: + # '_r.isNone(1)' instead of '_r.tensor(1)'. + index: int + + # The python argument it maps to. + argument: PythonArgument + + @property + def is_none_expr(self) -> str: + return f"_r.isNone({self.index})" + + +# To pass PythonArgParser output to the lambda wrapper, we need bind +# PythonArgParserOutputExpr to DispatchLambdaArgument. +# They are not always 1-1 mapped, e.g. scattered TensorOptions fields +# need be packed into a TensorOptions object, which is the argument +# that the lambda function wrapper takes. +@dataclass(frozen=True) +class DispatchLambdaArgumentExprs: + # The exprs that provide the binding for lambda arguments, e.g.: + # + # 'self' -> '_r.tensor(0)' + # 'min' -> 'out[0]' / 'min_indices' -> 'out[1]' + # 'options' -> 'options' + # + # It has 1-1 mapping with DispatchLambdaArgument. + exprs: Sequence[str] + + # Special local inits, which might introduce new variables that + # the 'exprs' above reference, e.g.: + # + # 'auto out = _r.tensorlist_n<2>(2);' + # + inits: Sequence[str] + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# +# Helper Functions +# +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # + + +def _cpp_signature(f: NativeFunction, *, method: bool = False) -> CppSignature: + return CppSignatureGroup.from_native_function(f, method=method).signature + + +def has_tensor_options(f: NativeFunction) -> bool: + return f.func.arguments.tensor_options is not None + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# +# Python Signature +# +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # + + +# 'simple_type' was introduced by the old codegen, which is slightly +# different from the python schema type, e.g.: doesn't have '?' suffix +# for optional Tensor/TensorList; doesn't have '[size]' suffix for list type. +def argument_type_str( + t: Type, *, simple_type: bool = False, symint: bool = True +) -> str: + if isinstance(t, BaseType): + if t.name == BaseTy.Tensor: + return "Tensor" + elif t.name == BaseTy.int: + return "int64_t" + elif t.name == BaseTy.float: + return "double" + elif t.name == BaseTy.str: + return "c10::string_view" + elif t.name in [ + BaseTy.bool, + BaseTy.QScheme, + BaseTy.Scalar, + BaseTy.ScalarType, + BaseTy.Generator, + BaseTy.Storage, + BaseTy.Layout, + BaseTy.Device, + BaseTy.DeviceIndex, + BaseTy.MemoryFormat, + BaseTy.Dimname, + BaseTy.Stream, + BaseTy.ConstQuantizerPtr, + BaseTy.SymInt, + ]: + # These python schema type names line up with their function schema names + return t.name.name + + elif isinstance(t, OptionalType): + if str(t.elem) == "Tensor": + # Is it desired to keep '?' for simple_type with new style dispatcher? + return "Tensor?" + elem = argument_type_str(t.elem, simple_type=simple_type, symint=symint) + return f"{elem}?" + elif isinstance(t, ListType): + size = t.size if not simple_type else None + if str(t.elem) == "bool": + assert t.size is not None + return f"::std::array" + elif str(t.elem) == "int": + return f"IntArrayRef[{size}]" if size is not None else "IntArrayRef" + elif str(t.elem) == "SymInt": + if symint: + return ( + f"SymIntArrayRef[{size}]" if size is not None else "SymIntArrayRef" + ) + else: + return f"IntArrayRef[{size}]" if size is not None else "IntArrayRef" + elif str(t.elem) == "Tensor": + return f"TensorList[{size}]" if size is not None else "TensorList" + elif str(t.elem) == "Scalar": + return f"ScalarList[{size}]" if size is not None else "ScalarList" + elif str(t.elem) == "Tensor?": + if simple_type: + return "c10::List>" + else: + return "const c10::List> &" + elif str(t.elem) == "Dimname": + return f"DimnameList[{size}]" if size is not None else "DimnameList" + elem = argument_type_str(t.elem, simple_type=simple_type, symint=symint) + return f"ArrayRef<{elem}>" + + raise RuntimeError(f"unrecognized type {repr(t)}") + + +def argument_type_size(t: Type) -> Optional[int]: + l = t.is_list_like() + if l is not None and str(l.elem) != "bool": + return l.size + else: + return None + + +def argument(a: Argument) -> PythonArgument: + return PythonArgument( + name=a.name, + type=a.type, + # TODO: directly translate a.default to python default + default=str( + pythonify_default(cpp.default_expr(a.default, a.type, symint=False)) + ) + if a.default is not None + else None, + default_init=None, + ) + + +# Generates a PythonSignature that can be used for either .pyi or PythonArgParser codegen +def signature( + f: NativeFunction, *, method: bool = False, pyi: bool = False +) -> PythonSignature: + return signature_from_schema( + f.func, category_override=f.category_override, method=method, pyi=pyi + ) + + +def signature_from_schema( + func: FunctionSchema, + *, + category_override: Optional[str], + method: bool = False, + pyi: bool = False, +) -> PythonSignature: + args: List[Argument] = [] + args.extend(func.arguments.pre_self_positional) + # Skip SelfArgument if this is method. + if not method and func.arguments.self_arg is not None: + args.append(func.arguments.self_arg.argument) + args.extend(func.arguments.post_self_positional) + args.extend(func.arguments.pre_tensor_options_kwarg_only) + # Skip TensorOptionsArguments. Python side TensorOptions + # arguments are created based on different rules - see below. + args.extend(func.arguments.post_tensor_options_kwarg_only) + args.extend(func.arguments.out) + + input_arg_set = {a.name for a in func.arguments.flat_positional} + kwarg_only_set = {a.name for a in func.arguments.flat_kwarg_only} + out_arg_set = {a.name for a in func.arguments.out} + + input_args = tuple(map(argument, filter(lambda a: a.name in input_arg_set, args))) + input_kwargs = tuple( + map(argument, filter(lambda a: a.name in kwarg_only_set, args)) + ) + outputs = tuple(map(argument, filter(lambda a: a.name in out_arg_set, args))) + + # Reintroduce the scattered fields of TensorOptions for Python. + # Compared to the cpp counterpart, the python arguments have new property + # (default_init) and a new argument 'requires_grad', which require some + # special handlings. + # [old codegen] TODO: because these aren't guaranteed to be 100% faithful + # to the original versions in the yaml, this recreation is a potential + # source of drift between eager and JIT. Pull this logic out to a shared place. + + has_tensor_input_arg = any( + a.type.is_tensor_like() for a in func.arguments.flat_non_out + ) + if any(a.name == "requires_grad" for a in func.schema_order_arguments()): + raise ValueError( + "argument named requires_grad is reserved, should not explicitly add it in the schema" + ) + + # [old codegen] this probably won't work if one of the returns is not a tensor, + # but it will produce a compile-time error that is obvious. + has_tensor_return = any(r.type.is_tensor_like() for r in func.returns) + + name: str = cpp.name(func) + is_factory_function = category_override == "factory" or ( + has_tensor_return and not has_tensor_input_arg + ) + is_like_or_new_function = ( + category_override in ("new", "like") + or name.startswith("new_") + or name.endswith("_like") + ) + + tensor_options_args: List[PythonArgument] = [] + if is_factory_function or is_like_or_new_function: + + def topt_default_init(name: str) -> Optional[str]: + topt_args = func.arguments.tensor_options + if topt_args is None: + return None + a = getattr(topt_args, name) + if a.default is None or a.default == "None": + return None + return cpp.default_expr(a.default, a.type, symint=False) + + tensor_options_args.append( + PythonArgument( + name="dtype", + type=OptionalType(BaseType(BaseTy.ScalarType)), + default="None", + default_init=( + None if is_like_or_new_function else topt_default_init("dtype") + ), + ) + ) + tensor_options_args.append( + PythonArgument( + name="layout", + type=OptionalType(BaseType(BaseTy.Layout)), + default="None", + default_init=( + None if is_like_or_new_function else topt_default_init("layout") + ), + ) + ) + tensor_options_args.append( + PythonArgument( + name="device", + type=OptionalType(BaseType(BaseTy.Device)), + default="None", + default_init=( + None + if is_like_or_new_function + else ( + topt_default_init("device") + or "torch::tensors::get_default_device()" + ) + ), + ) + ) + tensor_options_args.append( + PythonArgument( + name="pin_memory", + type=OptionalType(BaseType(BaseTy.bool)), + default="False", + default_init=None, + ) + ) + tensor_options_args.append( + PythonArgument( + name="requires_grad", + type=OptionalType(BaseType(BaseTy.bool)), + default="False", + default_init=None, + ) + ) + + returns = PythonReturns(returns=func.returns) + + return PythonSignature( + name=str(func.name.name), + input_args=input_args, + input_kwargs=input_kwargs, + output_args=PythonOutArgument.from_outputs(outputs), + tensor_options_args=tuple(tensor_options_args), + returns=returns, + method=method, + ) + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# +# Python Interface +# +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # + + +def namedtuple_fieldnames(returns: Tuple[Return, ...]) -> List[str]: + if len(returns) <= 1 or all(r.name is None for r in returns): + return [] + else: + if any(r.name is None for r in returns): + # When building on Windows, `PyStructSequence_UnnamedField` could not be + # resolved by the linker for some reason, which cause error in building: + # + # python_nn_functions.cpp.obj : error LNK2001: unresolved external symbol + # PyStructSequence_UnnamedField + # + # Thus, at this point in time, we do not support unnamed + # fields in namedtuple; you must either name all fields, + # or none of them. + raise ValueError("Unnamed field is not supported by codegen") + + return [str(r.name) for r in returns] + + +def argument_type_str_pyi(t: Type) -> str: + add_optional = False + if isinstance(t, OptionalType): + t = t.elem + add_optional = True + + if isinstance(t, BaseType): + if t.name in [BaseTy.int, BaseTy.DeviceIndex]: + ret = "_int" + if t.name == BaseTy.SymInt: + ret = "Union[_int, SymInt]" + elif t.name == BaseTy.float: + ret = "_float" + elif t.name == BaseTy.str: + ret = "str" + elif t.name == BaseTy.Scalar: + ret = "Union[Number, _complex]" + elif t.name == BaseTy.ScalarType: + ret = "_dtype" + elif t.name == BaseTy.bool: + ret = "_bool" + elif t.name == BaseTy.QScheme: + ret = "_qscheme" + elif t.name == BaseTy.Layout: + ret = "_layout" + elif t.name == BaseTy.Device: + ret = "Optional[DeviceLikeType]" + elif t.name == BaseTy.MemoryFormat: + ret = "memory_format" + elif t.name == BaseTy.Dimname: + ret = "Union[str, ellipsis, None]" + elif t.name == BaseTy.Storage: + ret = "Union[Storage, UntypedStorage]" + elif t.name in [BaseTy.Tensor, BaseTy.Generator, BaseTy.Stream]: + # These python schema type names line up with their function schema names + ret = t.name.name + + elif isinstance(t, ListType): + if str(t.elem) == "int": + ret = "Union[_int, _size]" if t.size is not None else "_size" + elif t.is_tensor_like(): + # TODO: this doesn't seem right... + # Tensor?[] currently translates to Optional[Union[Tuple[Tensor, ...], List[Tensor]]] + # It should probably translate to Union[Tuple[Optional[Tensor], ...], List[Optional[Tensor]]] + if isinstance(t.elem, OptionalType): + add_optional = True + ret = ( + "Union[Tensor, Tuple[Tensor, ...], List[Tensor]]" + if t.size is not None + else "Union[Tuple[Tensor, ...], List[Tensor]]" + ) + elif str(t.elem) == "float": + ret = "Sequence[_float]" + elif str(t.elem) == "SymInt" and t.size is not None: + elem = argument_type_str_pyi(t.elem) + ret = f"Union[{elem}, Sequence[{elem}]]" + else: + elem = argument_type_str_pyi(t.elem) + ret = f"Sequence[{elem}]" + + else: + raise RuntimeError(f"unrecognized type {repr(t)}") + + if add_optional: + ret = "Optional[" + ret + "]" + + return ret + + +def return_type_str_pyi(t: Type) -> str: + # Where arguments are open to accepting Union, return types should return + # concrete types + + if isinstance(t, OptionalType): + inner = return_type_str_pyi(t.elem) + return f"Optional[{inner}]" + + if isinstance(t, BaseType): + if t.name == BaseTy.Device: + return "_device" + elif t.name == BaseTy.Dimname: + ret = "Optional[str]" + else: + return argument_type_str_pyi(t) + + if isinstance(t, ListType): + inner = return_type_str_pyi(t.elem) + return f"List[{inner}]" + + return argument_type_str_pyi(t) + + +def returns_named_tuple_pyi(signature: PythonSignature) -> Optional[Tuple[str, str]]: + python_returns = [return_type_str_pyi(r.type) for r in signature.returns.returns] + namedtuple_name = signature.name + field_names = namedtuple_fieldnames(signature.returns.returns) + if field_names: + namedtuple_def_lines = [f"class {namedtuple_name}(NamedTuple):"] + namedtuple_def_lines.extend( + f" {name}: {typ}" for name, typ in zip(field_names, python_returns) + ) + namedtuple_def_lines.append("") # add an extra newline + namedtuple_def = "\n".join(namedtuple_def_lines) + # Example: + # namedtuple_def = ( + # "class max(NamedTuple):\n" + # " values: Tensor\n" + # " indices: Tensor\n" + # ) + return namedtuple_name, namedtuple_def + return None + + +def returns_str_pyi(signature: PythonSignature) -> str: + field_names = namedtuple_fieldnames(signature.returns.returns) + if field_names: + return f"torch.return_types.{signature.name}" + + python_returns = [return_type_str_pyi(r.type) for r in signature.returns.returns] + if len(python_returns) > 1: + return "Tuple[" + ", ".join(python_returns) + "]" + if len(python_returns) == 1: + return python_returns[0] + return "None" + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# +# C++ Function Dispatch +# +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# This section provides APIs to generate the code that does C++ function +# dispatch. The C++ function call is wrapped by a lambda function. +# For example: +# +# // aten::selu_(Tensor(a!) self) -> Tensor(a!) +# auto dispatch_selu_ = [](Tensor self) -> Tensor { +# pybind11::gil_scoped_release no_gil; +# return at::selu_(self); +# }; +# +# The lambda function's signature follows the C++ signature in common +# cases, e.g.: +# +# // aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor +# [](const Tensor & self, const Tensor & other, Scalar alpha) -> Tensor +# +# For out variant the 'out' argument's type is changed from 'Tensor &' +# to 'Tensor'. It's because when calling the lambda it passes in the +# PythonArgParser output '_r.tensor(3)', which is stack allocated object +# and needs to pass by value. Also see comments in 'dispatch_lambda_return_str()'. +# +# // aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +# [](Tensor out, const Tensor & self, const Tensor & other, Scalar alpha) -> Tensor +# +# For multi-output case it can keep using reference type because the +# PythonArgParser output has been unpacked to local variables, e.g.: +# +# // aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, +# // Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) +# [](Tensor & max, Tensor & max_values, const Tensor & self, Dimname dim, bool keepdim) -> std::tuple +# +# For deprecated python signature, it should follow deprecated python arg order. +# TODO: This is to keep same byte-for-byte result as the old codegen - maybe unnecessary? + + +def dispatch_lambda_args( + ps: PythonSignature, f: NativeFunction, symint: bool = True +) -> Tuple[DispatchLambdaArgument, ...]: + if isinstance(ps, PythonSignatureDeprecated): + schema = ps.deprecated_schema + else: + schema = f.func + + # Start with cpp arguments - dispatch lambda signature always include 'self' + cpp_args = cpp.arguments( + arguments=schema.arguments, + faithful=False, + symint=symint, + method=False, + cpp_no_default_args=f.cpp_no_default_args, + ) + out_args: Set[str] = {a.name for a in schema.arguments.out} + + # Convert from cpp argument to lambda argument + def dispatch_lambda_arg(cpp_arg: Binding) -> DispatchLambdaArgument: + type_str = cpp_arg.type + is_out_arg = cpp_arg.name in out_args + if ps.method and cpp_arg.name == "self": + # For method's 'self', we can use 'const Tensor &' and simply ignore mutability! + type_str = "const at::Tensor &" + else: + # For other cases we need prevent dangling refs to temps (unless it's + # unpacked scattered output) + # The reason is explained in the comments above and in 'dispatch_lambda_return_str()'. + # TODO: avoid this special handling? + ensure_temp_safe = len(out_args) <= 1 or not is_out_arg + if ensure_temp_safe: + type_str = { + "at::Tensor &": "at::Tensor", + }.get(type_str, type_str) + return DispatchLambdaArgument( + name=cpp_arg.name, + type_str=type_str, + is_out_arg=is_out_arg, + ) + + return tuple(map(dispatch_lambda_arg, cpp_args)) + + +# [old codegen] XXX: if you got here because of an assertion failure, it doesn't mean +# it's enough to just extend the list here. Before you do this, make sure +# to add an appropriate wrap() overload in torch/csrc/autograd/utils/wrap_outputs.h. +SUPPORTED_RETURN_TYPES = { + "at::Tensor", + "::std::tuple", + "::std::tuple", + "::std::tuple", + "::std::tuple", + "::std::tuple", + "::std::tuple", + "::std::tuple", + "::std::tuple", + "::std::tuple", + "::std::tuple", + "::std::tuple>", + "::std::vector", + # Needed for flash attention forw/backward + "::std::tuple", + "at::Scalar", + "bool", + "int64_t", + "void*", + "void", + "at::QScheme", + "double", + "at::IntArrayRef", + "at::ScalarType", + "at::Stream", +} + + +def dispatch_lambda_return_str(f: NativeFunction) -> str: + # [old codegen] Remove type annotation (e.g. 'Tensor' rather than 'Tensor &') + # because the dispatch lambdas take mutable arguments *by value*, not + # by reference. If you then return a reference to such an argument, you + # will now have a pointer to a dangling stack entry. Not good. + # + # You want: + # + # auto dispatch_selu_ = [](Tensor self) -> Tensor { ...; return at::selu_(self); }; + # ^^^^^^ + # + # *not* + # + # auto dispatch_selu_ = [](Tensor self) -> Tensor& { ...; return at::selu_(self); }; + # ^^^^^^^ + # + # (NB: We can't make dispatch_selu_ take Tensor&, because the enclosing + # codegen looks like dispatch_selu_(_r.tensor(0)), and you can't take a + # mutable reference to temporary. Maybe we could assign it to a + # variable itself.) + returns_without_annotation = tuple( + Return(r.name, r.type, None) for r in f.func.returns + ) + return_str = cpp.returns_type(returns_without_annotation, symint=True).cpp_type() + if return_str not in SUPPORTED_RETURN_TYPES: + raise RuntimeError(f"{f.func.name} returns unsupported type {return_str}") + return return_str + + +def cpp_dispatch_target(f: NativeFunction) -> str: + symint = f.func.has_symint() + name = cpp.name(f.func, symint_overload=symint) + if Variant.method in f.variants: + return f"self.{name}" + if Variant.function in f.variants: + if has_tensor_options(f) or f.func.name.name.base.endswith("_like"): + namespace = "torch" + else: + namespace = "at" + return f"{namespace}::{name}" + raise RuntimeError(f"could not dispatch, neither function nor method: {f.func}") + + +def cpp_dispatch_exprs( + f: NativeFunction, + *, + python_signature: Optional[PythonSignature] = None, +) -> Tuple[str, ...]: + cpp_args: Sequence[Binding] = _cpp_signature(f, method=False).arguments() + + exprs: Tuple[str, ...] = tuple() + if not isinstance(python_signature, PythonSignatureDeprecated): + # By default the exprs are consistent with the C++ signature. + exprs = tuple(a.name for a in cpp_args) + else: + # For deprecated python signature we may need fill in some constants. + exprs = tuple( + filter( + lambda n: n != "out" or f.func.is_out_fn(), + python_signature.deprecated_args_exprs, + ) + ) + + if Variant.method in f.variants: + exprs = tuple(filter("self".__ne__, exprs)) + + return exprs + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# +# Python / C++ Args Binding +# +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # + + +# We explicitly enumerate the PythonArgParser unpacking methods for all +# supported types. This might be more verbose than necessary, partially +# because of the irregularity of unpacking method naming, partially +# because we want to mimic the old codegen behavior - to reject +# unexpected and/or unsupported cases which the old codegen rejects. +# For certain cases it is intentionally more restrictive than necessary, +# e.g.: it doesn't accepts doublelist with definite size. +def arg_parser_unpack_method( + t: Type, default: Optional[str], default_init: Optional[str], *, symint: bool = True +) -> str: + has_default_init = default_init is not None + if has_default_init and str(t) not in ( + "ScalarType?", + "ScalarType", + "Device", + "Device?", + "Layout", + "Layout?", + "bool", + "bool?", + ): + raise RuntimeError(f"type '{t}' does not supported unpacking with default") + + if isinstance(t, BaseType): + if t.name in [ + BaseTy.Tensor, + BaseTy.Stream, + BaseTy.Storage, + BaseTy.Scalar, + BaseTy.Dimname, + ]: + # These unpack methods line up with their schema names + return t.name.name.lower() + elif t.name == BaseTy.ScalarType: + return "scalartypeWithDefault" if has_default_init else "scalartype" + elif t.name == BaseTy.Device: + return "deviceWithDefault" if has_default_init else "device" + elif t.name == BaseTy.DeviceIndex: + return "toInt64" + elif t.name == BaseTy.int: + return "toInt64" + elif t.name == BaseTy.SymInt: + return "toSymInt" if symint else "toInt64" + elif t.name == BaseTy.bool: + return "toBoolWithDefault" if has_default_init else "toBool" + elif t.name == BaseTy.float: + return "toDouble" + elif t.name == BaseTy.str: + return "stringView" + elif t.name == BaseTy.Layout: + return "layoutWithDefault" if has_default_init else "layout" + elif t.name == BaseTy.MemoryFormat: + return "memoryformat" + + elif isinstance(t, OptionalType): + if str(t.elem) == "Tensor": + return "optionalTensor" + elif str(t.elem) == "Generator": + return "generator" + elif str(t.elem) == "Dimname[]": + return "toDimnameListOptional" + elif not has_default_init and default in (None, "None", "c10::nullopt"): + # If default is None: append 'Optional' to elem's unpacking method + return ( + arg_parser_unpack_method(t.elem, None, None, symint=symint) + "Optional" + ) + else: + # Otherwise, load as underlying type with default + return arg_parser_unpack_method( + t.elem, default, default_init, symint=symint + ) + + elif isinstance(t, ListType): + if str(t.elem) == "Tensor": + # accept and use definite size + return f"tensorlist_n<{t.size}>" if t.size is not None else "tensorlist" + elif str(t.elem) == "Tensor?": + return "list_of_optional_tensors" + elif str(t.elem) == "Dimname": + # accept definite size + return "dimnamelist" + elif str(t.elem) == "int": + # accept definite size + return "intlist" + elif str(t.elem) == "float": + return "doublelist" + elif str(t.elem) == "SymInt": + # accept definite size + return "symintlist" if symint else "intlist" + elif str(t.elem) == "Scalar": + return "scalarlist" + raise RuntimeError(f"type '{t}' is not supported by PythonArgParser") + + +# Return RHS expression for python argument using PythonArgParser output. +# e.g. for arg name 'foo', arg type 'bool', arg_index = 2, returns '_r.toBool(2)' +def arg_parser_output_expr( + arg_index: int, a: PythonArgument, *, symint: bool = True +) -> PythonArgParserOutputExpr: + has_default = a.default_init is not None + unpack_method = arg_parser_unpack_method( + t=a.type, default=a.default, default_init=a.default_init, symint=symint + ) + default = f", {a.default_init}" if has_default else "" + expr = f"_r.{unpack_method}({arg_index}{default})" + + return PythonArgParserOutputExpr( + name=a.name, + expr=expr, + index=arg_index, + argument=a, + ) + + +# Returns a map with key = arg_name and value = PythonArgParserOutputExpr. +def arg_parser_output_exprs( + ps: PythonSignature, f: NativeFunction, *, symint: bool = True +) -> Dict[str, PythonArgParserOutputExpr]: + return { + e.name: e + for i, a in enumerate(ps.arguments()) + for e in (arg_parser_output_expr(i, a, symint=symint),) + } + + +# argument name to type for scattered tensor options fields +TENSOR_OPTIONS_FIELDS = { + "dtype": "ScalarType?", + "device": "Device?", + "layout": "Layout?", + "pin_memory": "bool?", + "requires_grad": "bool?", +} + + +# bind arg parser outputs (python args) with dispatch lambda arguments (c++ args). +def dispatch_lambda_exprs( + ps: PythonSignature, f: NativeFunction, *, symint: bool = True +) -> DispatchLambdaArgumentExprs: + # This method is to bind 'arg_parser_outputs' and 'lambda_args' by producing + # 'inits' and 'lambda_args_exprs' for each lambda argument using arg parser + # outputs. + arg_parser_outputs = arg_parser_output_exprs(ps, f, symint=symint) + lambda_args = dispatch_lambda_args(ps, f, symint=symint) + inits: List[str] = [] + lambda_args_exprs: Dict[str, str] = {} + + has_toptions = has_tensor_options(f) + + # 1. special inits/unpacking to provide binding exprs for lambda arguments. + for a in ps.arguments(skip_tensor_options=True): + name = a.name + arg_parser_expr = arg_parser_outputs[a.name].expr + + if has_toptions and name == "self": + # TODO: why this needs to be special case? + inits.extend( + [ + f"auto self = {arg_parser_expr};", + ] + ) + lambda_args_exprs[name] = name + elif ( + isinstance(a, PythonOutArgument) + and len(a.outputs) > 1 + and f.func.is_out_fn() + ): + inits.extend( + [ + f"auto out = {arg_parser_expr};", + ] + ) + for i, out_arg in enumerate(a.outputs): + lambda_args_exprs[out_arg.name] = f"out[{i}]" + elif str(a.type) == "Dimname[]?": + # [old codegen] + # TODO: make this part of something more general, or get rid of it. + # optional> are special. The PythonArgParser returns an + # optional>, which cannot be implicitly converted to + # optional>. One needs to unwrap the optional and rewrap. + inits.extend( + [ + f"auto __{name} = {arg_parser_expr};", + f"c10::optional {name} = __{name} ? c10::make_optional(DimnameList(__{name}.value())) : c10::nullopt;", # noqa: B950 + ] + ) + lambda_args_exprs[name] = name + else: + # default case - directly using PythonArgParser output expr + lambda_args_exprs[name] = arg_parser_expr + + # method's self is passed directly to python binding, rather than parsed + if ps.method: + lambda_args_exprs["self"] = "self" + + # 2. special packing/checking for TensorOptions. + tensor_options_args_names = [a.name for a in ps.tensor_options_args] + if has_toptions: + if f.func.is_out_fn(): + raise RuntimeError(f"{f.func}: tensor options with output arg") + for a in ps.tensor_options_args: + if a.name not in TENSOR_OPTIONS_FIELDS: + raise RuntimeError( + f"{f.func}: unrecognized tensor options field '{a.name}' in python binding arguments" + ) + if str(a.type) != TENSOR_OPTIONS_FIELDS.get(a.name): + raise RuntimeError( + f"{f.func}: unrecognized type '{str(a.type)}' for tensor options field '{a.name}'" + ) + if not all( + a in tensor_options_args_names for a in TENSOR_OPTIONS_FIELDS.keys() + ): + raise RuntimeError( + f"{f.func}: incomplete tensor options args: {tensor_options_args_names}" + ) + + inits.append( + f"""\ +const auto options = TensorOptions() + .dtype({arg_parser_outputs['dtype'].expr}) + .device({arg_parser_outputs['device'].expr}) + .layout({arg_parser_outputs['layout'].expr}) + .requires_grad({arg_parser_outputs['requires_grad'].expr}) + .pinned_memory({arg_parser_outputs['pin_memory'].expr}); +torch::utils::maybe_initialize_cuda(options); +""" + ) + lambda_args_exprs["options"] = "options" + + # 3. special case - access scattered TensorOptions fields without packing + # TODO: maybe move to the generator side as it's not related to binding. + if not has_toptions and tensor_options_args_names: + if "dtype" in tensor_options_args_names: + # we're an output-arg variant, check these args against output tensor + if not f.func.is_out_fn(): + raise RuntimeError( + f"{f.func}: dtype in tensor_options_args without output arg" + ) + if not all(a in tensor_options_args_names for a in ("layout", "device")): + raise RuntimeError( + f"{f.func}: incomplete tensor options for output check" + ) + + inits.append( + f"""\ +check_out_type_matches({arg_parser_outputs['out'].expr}, {arg_parser_outputs['dtype'].expr}, + {arg_parser_outputs['dtype'].is_none_expr}, {arg_parser_outputs['layout'].expr}, + {arg_parser_outputs['device'].expr}, {arg_parser_outputs['device'].is_none_expr}); +""" + ) + # we'll set requires_grad on outgoing tensor + if "requires_grad" not in tensor_options_args_names: + raise RuntimeError( + f'{f.func}: expected "requires_grad" in tensor_options_args absent, but found [{tensor_options_args_names}]' + ) + + return DispatchLambdaArgumentExprs( + exprs=tuple(lambda_args_exprs[a.name] for a in lambda_args), + inits=inits, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/api/translate.py b/env-llmeval/lib/python3.10/site-packages/torchgen/api/translate.py new file mode 100644 index 0000000000000000000000000000000000000000..00f8558413450f0f37251ab803c14a41526f9507 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/api/translate.py @@ -0,0 +1,430 @@ +from typing import Dict, List, NoReturn, Sequence, Union + +from torchgen.api.types import ( + ArrayRefCType, + BaseCType, + Binding, + boolT, + ConstRefCType, + deviceT, + Expr, + intArrayRefT, + iOptTensorListRefT, + layoutT, + ListCType, + longT, + memoryFormatT, + MutRefCType, + NamedCType, + opmath_t, + OptionalCType, + optionalIntArrayRefT, + optionalScalarRefT, + optionalSymIntArrayRefT, + optionalTensorRefT, + scalar_t, + scalarT, + scalarTypeT, + SpecialArgName, + symIntArrayRefT, + SymIntT, + tensorOptionsT, + tensorT, + VectorCType, +) + +# This file implements a small program synthesis engine that implements +# conversions between one API to another. +# +# The key data type in this file in NamedCType, short for Named C++ semantic type. A NamedCType +# represents a C++ type, plus semantic information about what it represents. +# For example, consider the argument "bool pin_memory"; its normal C++ type is +# "bool", but its C++ semantic type also keeps track that this represents a +# "pin_memory"; you can't just use a random other boolean in a context where you +# need a "pin_memory"! +# +# The translator takes a list of needed NamedCTypes, and then figures out how +# to construct expressions with these NamedCTypes from the given bindings. Many +# of these expressions are trivial (I need a Tensor other; there's a Tensor +# other scope); others are more nontrivial and may require packing/unpacking. +# Some examples of non-trivial action: +# +# - Need the "dtype" binding? Well, maybe "dtype" isn't available +# in the context, instead, "options" is, and you need to extract +# it from there. (Gather) +# +# - Need the "context" binding? Well, maybe "context" isn't available +# in the context, and you need to construct it from "dtype", "device", +# etc. (Scatter) +# +# - Need the "memory_format" binding? Well, actually, it's available +# from both "memory_format" and "options", so you had better make sure +# they are consistent. (Join) + +options_ctype = NamedCType("options", ConstRefCType(BaseCType(tensorOptionsT))) + +out_tensor_ctype = NamedCType("out", ConstRefCType(BaseCType(tensorT))) + +longVec_ctype = VectorCType(BaseCType(longT)) +longSymVec_ctype = VectorCType(BaseCType(SymIntT)) +optionalLongVec_ctype = OptionalCType(VectorCType(BaseCType(longT))) +optionalScalar_ctype = OptionalCType(BaseCType(scalarT)) +optionalTensor_ctype = OptionalCType(BaseCType(tensorT)) + + +class UnsatError(RuntimeError): + pass + + +# Given a set of in-scope bindings and a set of target bindings, synthesize +# a list of expressions that uses only the in-scope bindings (bindings) that +# have all of the types of goals. You may want to use this function if +# you're generating code for a function like: +# +# void f({args}) { +# g({exprs}); // g is a different API +# } +# +# and you need to generate "exprs". +# +# Typically, a list of Bindings is convenient to get (you usually call something +# like arguments() to get them); but technically you only need less information: +# for 'bindings' an (un-ordered) list of Exprs is sufficient; similarly, for +# 'goals', an (ordered) list of NamedCType goals is sufficient. If you are doing +# something more complicated, e.g., tracking the set of bindings in a context, +# you may find using these smaller types more convenient. +def translate( + bindings: Sequence[Union[Expr, Binding]], + goals: Sequence[Union[NamedCType, Binding]], + *, + method: bool = False, + allow_expensive_conversions: bool = False, +) -> List[Expr]: + binding_exprs: List[Expr] = [] + for b in bindings: + if isinstance(b, Binding): + binding_exprs.append( + Expr( + expr=b.name, + type=b.nctype, + ) + ) + else: + binding_exprs.append(b) + + goal_ctypes: List[NamedCType] = [] + for g in goals: + if isinstance(g, Binding): + goal_ctypes.append(g.nctype) + else: + goal_ctypes.append(g) + + # Add all the bindings to the context + ctx: Dict[NamedCType, str] = {} + for b in binding_exprs: + ctx[b.type] = b.expr + + # While we're at it, do some simple forward inference, looking through + # constructors. + # + # NB: When should you do forward inference versus backward inference? + # The general idea: + # + # - Backward inference WHEN the goal gets smaller + # - Forward inference WHEN the hypothesis gets smaller + # + # This helps ensure termination: backward inference starts with a goal + # and tries to make it simpler and simpler until it's trivial; if the + # goal can grow in size, we blow up to a really huge goal size. + # Similarly, with forward inference we take hypotheses and decompose + # them into simpler hypotheses; if hypotheses could expand in size, + # we also have potential nontermination. (In the code below, forward + # inference is only ever carried out at a single step, but you could + # imagine repeated application of forward inference being profitable.) + # + # A good starting point in the literature for exploring more about proof + # search are these lecture notes + # https://www.cs.cmu.edu/~fp/courses/oregon-m10/04-focusing.pdf + # + # TODO: My kingdom for a pattern matcher + # https://www.python.org/dev/peps/pep-0634/ + # + # TODO: This could get us in recomputation trouble if b.expr is nontrivial. + # Fix this by implementing some sort of sharing so that if multiple + # goals share the same expression, we only compute it once. This seems + # to matter in practice as compiler is often unwilling to CSE nontrivial + # expressions like scalar.to() + t = b.type + if ( + isinstance(t, ConstRefCType) + and isinstance(t.elem, OptionalCType) + and isinstance(t.elem.elem, BaseCType) + and str(t.elem.elem.type) == "at::Tensor" + ): + ctx[ + NamedCType(t.elem.elem.name, ConstRefCType(BaseCType(tensorT))) + ] = f"({b.expr}.has_value() ? *{b.expr} : at::Tensor())" + + if t.type == ConstRefCType(OptionalCType(BaseCType(tensorT))): + ctx[ + NamedCType(t.name, BaseCType(optionalTensorRefT)) + ] = f"(({b.expr}.has_value() && (*{b.expr}).defined()) ? at::OptionalTensorRef(*{b.expr}) : at::OptionalTensorRef())" + + if t.type == ConstRefCType(BaseCType(scalarT)): + ctx[NamedCType(t.name, BaseCType(opmath_t))] = f"({b.expr}).to()" + + if t.type == ConstRefCType(OptionalCType(BaseCType(scalarT))): + ctx[ + NamedCType(t.name, BaseCType(optionalScalarRefT)) + ] = f"({b.expr}.has_value() ? at::OptionalScalarRef(&({b.expr}.value())) : at::OptionalScalarRef())" + + if t.type == BaseCType(scalar_t): + ctx[ + NamedCType(t.name, BaseCType(opmath_t)) + ] = f"static_cast({b.expr})" + + # [Note: IOptTensorListRef] + if t.type == ConstRefCType(ListCType(OptionalCType(BaseCType(tensorT)))): + ctx[ + NamedCType(t.name, BaseCType(iOptTensorListRefT)) + ] = f"at::IOptTensorListRef({b.expr})" + + # Add implicit bindings if the generated code is inside a Tensor method + if method: + ctx[ + NamedCType("self", MutRefCType(BaseCType(tensorT))) + ] = "const_cast(*this)" + ctx[ + NamedCType("self", ConstRefCType(BaseCType(tensorT))) + ] = "const_cast(*this)" + # This is better! Byte-for-byte compat + # ctx[NamedCType("self", ConstRefCType(BaseCType(tensorT)))] = "*this" + + def unsat(goal: NamedCType) -> NoReturn: + ctx_desc = "\n".join( + f" {t.cpp_type()} {t.name}; // {e}" for t, e in ctx.items() + ) + raise UnsatError( + f""" +Failed to synthesize the expression "{goal.cpp_type()} {goal.name}". +When I failed, the following bindings were available in the context: + +{ctx_desc} + +This probably means there is a missing rule in the rules of torchgen.api.translate. +Check this module for more information. +""" + ) + + # A shitty backtracking search implementation. It's shitty because it + # does backtracking via stack (bad idea!) and for the most part tries to + # avoid backtracking. In particular, if + # direct=True, we won't try to do any fancy synthesis, just trivial + # conversions (e.g., "T a" is OK for "const T& a"). So all of the + # existing rules in this function simply try to solve immediately, + # and bail if things don't work out. + def solve(goal: NamedCType, *, direct: bool) -> str: + def direct_solve(goal: NamedCType) -> str: + return solve(goal, direct=True) + + if goal in ctx: + # Trivial + return ctx[goal] + + # const & is satisfied with mutable & + if isinstance(goal.type, ConstRefCType): + try: + # WARNING: not strictly decreasing; be careful not + # to add a direct conversion that goes satisfies + # mutable& with const& + return solve( + NamedCType(goal.name, MutRefCType(goal.type.elem)), direct=direct + ) + except UnsatError: + pass + + # mutable & is satisfied with value + if isinstance(goal.type, MutRefCType): + try: + return solve(NamedCType(goal.name, goal.type.elem), direct=direct) + except UnsatError: + pass + + # TODO: These are referentially equal, shouldn't have to do this; + # ensuring we don't use type synonym IntArrayRef in codegen would + # help + if goal.type == ArrayRefCType(BaseCType(longT)): + return solve(NamedCType(goal.name, BaseCType(intArrayRefT)), direct=direct) + + if direct: + unsat(goal) + + # For now, all of these rules are mutually exclusive. + if goal == NamedCType("memory_format", OptionalCType(BaseCType(memoryFormatT))): + memory_format = direct_solve( + NamedCType( + SpecialArgName.possibly_redundant_memory_format, + OptionalCType(BaseCType(memoryFormatT)), + ) + ) + # No need to join "memory_format" and "options" if the target API takes "options" directly. + # Otherwise it will cause the redundant memory_format error. + if options_ctype in goal_ctypes: + return memory_format + try: + options = direct_solve(options_ctype) + return f"c10::impl::check_tensor_options_and_extract_memory_format({options}, {memory_format})" + except UnsatError: + return memory_format + elif goal == NamedCType("options", BaseCType(tensorOptionsT)): + dtype = direct_solve( + NamedCType("dtype", OptionalCType(BaseCType(scalarTypeT))) + ) + pin_memory = direct_solve( + NamedCType("pin_memory", OptionalCType(BaseCType(boolT))) + ) + device = direct_solve( + NamedCType("device", OptionalCType(BaseCType(deviceT))) + ) + layout = direct_solve( + NamedCType("layout", OptionalCType(BaseCType(layoutT))) + ) + return f"TensorOptions().dtype({dtype}).layout({layout}).device({device}).pinned_memory({pin_memory})" + + elif goal == NamedCType("dtype", OptionalCType(BaseCType(scalarTypeT))): + try: + options = direct_solve(options_ctype) + return f"optTypeMetaToScalarType({options}.dtype_opt())" + except UnsatError: + out_tensor = direct_solve(out_tensor_ctype) + return f"{out_tensor}.scalar_type()" + + elif goal == NamedCType("layout", OptionalCType(BaseCType(layoutT))): + try: + options = direct_solve(options_ctype) + return f"{options}.layout_opt()" + except UnsatError: + out_tensor = direct_solve(out_tensor_ctype) + return f"{out_tensor}.layout()" + + elif goal == NamedCType("device", OptionalCType(BaseCType(deviceT))): + try: + options = direct_solve(options_ctype) + return f"{options}.device_opt()" + except UnsatError: + out_tensor = direct_solve(out_tensor_ctype) + return f"{out_tensor}.device()" + + elif goal == NamedCType("pin_memory", OptionalCType(BaseCType(boolT))): + try: + options = direct_solve(options_ctype) + return f"{options}.pinned_memory_opt()" + except UnsatError: + # If we're calling a factory op from its out= variant, + # We don't actually care about the value of pin_memory. + out_tensor = direct_solve(out_tensor_ctype) + return "c10::nullopt" + + # We can always do translations from value types to reference types, like vector -> IntArrayRef + elif goal.type == BaseCType(intArrayRefT): + try: + return direct_solve(NamedCType(goal.name, longVec_ctype)) + except UnsatError: + # We can also go SymIntArrayRef -> IntArrayRef + symIntArrayRef_type = direct_solve( + NamedCType(goal.name, BaseCType(symIntArrayRefT)) + ) + return f"C10_AS_INTARRAYREF_SLOW({symIntArrayRef_type})" + elif goal.type == BaseCType(symIntArrayRefT): + try: + r = direct_solve(NamedCType(goal.name, BaseCType(intArrayRefT))) + return f"c10::fromIntArrayRefSlow({r})" + except UnsatError: + return direct_solve(NamedCType(goal.name, longSymVec_ctype)) + elif goal.type == BaseCType(SymIntT): + return direct_solve(NamedCType(goal.name, BaseCType(longT))) + elif goal.type == OptionalCType(BaseCType(SymIntT)): + argname = direct_solve( + NamedCType(goal.name, OptionalCType(BaseCType(longT))) + ) + return f"{argname}.has_value() ? c10::make_optional(c10::SymInt(*{argname})) : c10::nullopt" + elif goal.type == BaseCType(longT): + symInt_type = direct_solve(NamedCType(goal.name, BaseCType(SymIntT))) + return f"{symInt_type}.guard_int(__FILE__, __LINE__)" + elif goal.type == OptionalCType(BaseCType(longT)): + argname = direct_solve( + NamedCType(goal.name, OptionalCType(BaseCType(SymIntT))) + ) + return f"{argname}.has_value() ? c10::make_optional({argname}->guard_int(__FILE__, __LINE__)) : c10::nullopt" + elif goal.type == BaseCType(optionalIntArrayRefT): + try: + return direct_solve(NamedCType(goal.name, optionalLongVec_ctype)) + except UnsatError: + argname = direct_solve( + NamedCType(goal.name, BaseCType(optionalSymIntArrayRefT)) + ) + return f"{argname}.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*{argname})) : c10::nullopt" + elif goal.type == BaseCType(optionalSymIntArrayRefT): + # TODO: You might also want to solve this from longSymVec_ctype or + # an optional version of it + argname = direct_solve( + NamedCType(goal.name, BaseCType(optionalIntArrayRefT)) + ) + return f"{argname}.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*{argname})) : c10::nullopt" + elif goal.type == BaseCType(optionalScalarRefT): + return direct_solve(NamedCType(goal.name, optionalScalar_ctype)) + elif goal.type == BaseCType(optionalTensorRefT): + return direct_solve(NamedCType(goal.name, optionalTensor_ctype)) + + # Note [translation from C++ reference to value types] + # The below cases are all for when we have an argument with a reference type, + # and a corresponding goal with a value type. + # These are needed when we populate the inputs to a lambda capture and we need + # to guarantee the lifetime of each captured argument. + # We guard it with an explicit kwarg because converting to a value type is expensive + # (O(n)) to convert from IntArrayRef to vector), + # so the caller of translate() should be explicit that they need it. + if allow_expensive_conversions: + if goal.type == VectorCType(BaseCType(longT)): + intArrayRef_ctype = NamedCType(goal.name, BaseCType(intArrayRefT)) + argname = direct_solve(intArrayRef_ctype) + return f"{argname}.vec()" + if goal.type == VectorCType(BaseCType(SymIntT)): + symIntArrayRef_ctype = NamedCType(goal.name, BaseCType(symIntArrayRefT)) + argname = direct_solve(symIntArrayRef_ctype) + return f"{argname}.vec()" + elif goal.type == OptionalCType(VectorCType(BaseCType(longT))): + optionalIntArrayRef_ctype = NamedCType( + goal.name, BaseCType(optionalIntArrayRefT) + ) + argname = direct_solve(optionalIntArrayRef_ctype) + return f"{argname}.has_value() ? c10::make_optional({argname}->vec()) : c10::nullopt" + elif goal.type == OptionalCType(BaseCType(scalarT)): + optionalScalarRef_ctype = NamedCType( + goal.name, BaseCType(optionalScalarRefT) + ) + argname = direct_solve(optionalScalarRef_ctype) + return f"{argname}.has_value() ? c10::make_optional({argname}) : c10::nullopt" + elif goal.type == OptionalCType(BaseCType(scalarT)): + optionalTensorRef_ctype = NamedCType( + goal.name, BaseCType(optionalTensorRefT) + ) + argname = direct_solve(optionalTensorRef_ctype) + return f"{argname}.has_value() ? c10::make_optional({argname}) : c10::nullopt" + # Technically, we also need to handle cases of C++ containers holding reference types. + # But there currently aren't any ops that require lambda capture codegen + # With arguments like std::vector. + # If that changes, we'll have to add the translation here. + + # We allow const casting on tensors, since const-correctness is a bit broken for at::Tensor. + # We could probably generalize this to non-tensor types too. + if goal.type == MutRefCType(BaseCType(tensorT)): + const_ref_tensor_ctype = NamedCType( + goal.name, ConstRefCType(BaseCType(tensorT)) + ) + argname = direct_solve(const_ref_tensor_ctype) + return f"const_cast({argname})" + + unsat(goal) + + return [Expr(solve(g, direct=False), g) for g in goal_ctypes] diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/api/ufunc.py b/env-llmeval/lib/python3.10/site-packages/torchgen/api/ufunc.py new file mode 100644 index 0000000000000000000000000000000000000000..7f044706068cf9af126070d8fa39cdca7da83b8b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/api/ufunc.py @@ -0,0 +1,209 @@ +from dataclasses import dataclass +from typing import List, Optional + +import torchgen.api.types as api_types + +from torchgen.api import cpp, structured +from torchgen.api.types import ( + ArgName, + BaseCppType, + BaseCType, + Binding, + ConstRefCType, + CType, + NamedCType, + scalarT, +) +from torchgen.model import ( + Argument, + BaseTy, + BaseType, + DispatchKey, + FunctionSchema, + NativeFunctionsGroup, + Type, +) + + +def schema_kernel_name(func: FunctionSchema, dispatch_key: DispatchKey) -> str: + assert func.is_out_fn(), "ufunc.kernel_name should only be invoked on out schemas" + return f"ufunc_{func.name.name}_{dispatch_key}" + + +def kernel_name(g: NativeFunctionsGroup, dispatch_key: DispatchKey) -> str: + return schema_kernel_name(g.out.func, dispatch_key) + + +# Tensors are omitted (as they are stored in TensorIterator), everything else is +# passed along (technically, we can pass tensors along too, it just wastes +# argument registers) +# +# NB: used for CPU only +def dispatchstub_type(t: Type, *, binds: ArgName) -> Optional[NamedCType]: + # Dispatch stubs are always plain ints + r = cpp.valuetype_type(t, binds=binds, symint=False) + if r is not None: + return r + + if t == BaseType(BaseTy.Scalar): + return NamedCType(binds, ConstRefCType(BaseCType(scalarT))) + elif t == BaseType(BaseTy.Tensor): + return None + else: + raise AssertionError(f"unrecognized type {repr(t)}") + + +def opmath_type(scalar_t: BaseCppType) -> BaseCppType: + if scalar_t == api_types.scalar_t: + return api_types.opmath_t + raise NotImplementedError + + +# NB: Tensors in constructor are stored in opmath_t, not scalar_t +# because Tensor in constructor = its a scalar tensor partially applied = +# it can be higher precision and we want to compute in that higher precision +# +# NB: CUDA only +def ufunctor_ctor_type(t: Type, *, binds: ArgName, scalar_t: BaseCppType) -> NamedCType: + r = cpp.valuetype_type(t, binds=binds, symint=False) + if r is not None: + return r + + if t == BaseType(BaseTy.Scalar): + return NamedCType(binds, BaseCType(opmath_type(scalar_t))) + elif t == BaseType(BaseTy.Tensor): + return NamedCType(binds, BaseCType(opmath_type(scalar_t))) + else: + raise AssertionError(f"unrecognized type {repr(t)}") + + +# Only Tensors ever get passed directly to operator() +# +# NB: CUDA only +# (Actually, this works for CPU too) +def ufunctor_apply_type( + t: Type, *, binds: ArgName, scalar_t: BaseCppType +) -> NamedCType: + if t == BaseType(BaseTy.Tensor): + return NamedCType(binds, BaseCType(scalar_t)) + else: + raise AssertionError(f"unrecognized type {repr(t)}") + + +# The actual ufunc template function the user writes. Everything here +# is done in the computation type. compute_t is opmath_t in CUDA and scalar_t +# in CPU +def ufunc_type(t: Type, *, binds: ArgName, compute_t: CType) -> NamedCType: + r = cpp.valuetype_type(t, binds=binds, symint=False) + if r is not None: + return r + + if t == BaseType(BaseTy.Scalar): + return NamedCType(binds, compute_t) + elif t == BaseType(BaseTy.Tensor): + return NamedCType(binds, compute_t) + else: + raise AssertionError(f"unrecognized type {repr(t)}") + + +def ufunctor_ctor_argument(a: Argument, scalar_t: BaseCppType) -> Binding: + return Binding( + nctype=ufunctor_ctor_type(a.type, binds=a.name, scalar_t=scalar_t), + name=a.name, + default=None, + argument=a, + ) + + +def ufunctor_apply_argument(a: Argument, scalar_t: BaseCppType) -> Binding: + return Binding( + nctype=ufunctor_apply_type(a.type, binds=a.name, scalar_t=scalar_t), + name=a.name, + default=None, + argument=a, + ) + + +def ufunc_argument(a: Argument, compute_t: CType) -> Binding: + return Binding( + nctype=ufunc_type(a.type, binds=a.name, compute_t=compute_t), + name=a.name, + default=None, + argument=a, + ) + + +@dataclass(frozen=True) +class UfunctorBindings: + ctor: List[Binding] + apply: List[Binding] + + +# ufunctors are a CUDA-only concept representing functors that take some of +# their arguments on a host-side constructor, and the rest in the device-side +# apply. E.g., +# +# template +# struct CUDAFunctorOnSelf_add { +# using opmath_t = at::opmath_type; +# opmath_t other_; +# opmath_t alpha_; +# CUDAFunctorOnSelf_add(opmath_t other, opmath_t alpha) : other_(other), alpha_(alpha) {} +# __device__ scalar_t operator()(scalar_t self) { +# return ufunc::add(static_cast(self), other_, alpha_); +# } +# }; +# +# The ctor refers to the constructor CUDAFunctorOnSelf_add, while apply refers +# to the operator() definition +def ufunctor_arguments( + g: NativeFunctionsGroup, *, scalar_tensor_idx: Optional[int], scalar_t: BaseCppType +) -> UfunctorBindings: + ctor = [] + apply = [] + for a in g.functional.func.arguments.flat_non_out: + if a.type.is_tensor_like(): + if scalar_tensor_idx == 0: + # put it in the ctor anyway + ctor.append(ufunctor_ctor_argument(a, scalar_t=scalar_t)) + scalar_tensor_idx = None + else: + if scalar_tensor_idx is not None: + scalar_tensor_idx -= 1 + apply.append(ufunctor_apply_argument(a, scalar_t=scalar_t)) + else: + ctor.append(ufunctor_ctor_argument(a, scalar_t=scalar_t)) + assert scalar_tensor_idx is None + return UfunctorBindings(ctor=ctor, apply=apply) + + +# ufuncs are the inner loop template functions that you wrote in ufunc/add.h +# which do the actual computation in question. E.g., +# +# template +# C10_HOST_DEVICE T add(T self, T other, T alpha) __ubsan_ignore_undefined__ { +# return self + alpha * other; +# } +# +# In this file, we refer to T as compute_t which is bound by caller +def ufunc_arguments(g: NativeFunctionsGroup, *, compute_t: CType) -> List[Binding]: + return [ + ufunc_argument(a, compute_t=compute_t) + for a in g.functional.func.arguments.flat_non_out + ] + + +# Stubs are the DispatchStub trampolines that CPU kernels use to get to their +# vectorized versions. E.g., +# +# using structured_binary_fn_alpha = void(*)(TensorIteratorBase&, const Scalar& alpha); +# DECLARE_DISPATCH(structured_binary_fn_alpha, add_stub); +def stub_arguments(g: NativeFunctionsGroup) -> List[Binding]: + # stubs drop all tensor arguments (they are implicit in the TensorIterator + # argument and keep everything else) + return [ + r + for a in g.out.func.arguments.flat_non_out + if not a.type.is_tensor_like() + for r in structured.argument(a) + ] diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/api/unboxing.py b/env-llmeval/lib/python3.10/site-packages/torchgen/api/unboxing.py new file mode 100644 index 0000000000000000000000000000000000000000..df4430c49b745753dc83b2115a7f4d8c000190d0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/api/unboxing.py @@ -0,0 +1,248 @@ +from typing import List, Tuple + +from torchgen.api import cpp +from torchgen.api.types import Binding, CppSignatureGroup, CType +from torchgen.model import ( + Argument, + BaseTy, + BaseType, + ListType, + NativeFunction, + OptionalType, + Type, +) + +# This file generates the code for unboxing wrappers, i.e., the glue logic to unbox a boxed operator and convert the +# ivalues from stack to correct arguments to the unboxed kernel, based on corresponding JIT schema. This codegen is +# an alternative way to generate unboxing wrappers similar to the existing C++ metaprogramming approach but gets the +# job done statically. These generated unboxing wrappers will be useful under the scenario where we need to register +# a fixed set of operators known at compile time and thus can save some time in runtime initialization phase. +# +# Here's an example on how the codegen works: +# +# - Function Schema (source of truth) +# +# aten::empty.names(int[] size, *, Dimname[]? names, +# ScalarType? dtype=None, Layout? layout=None, +# Device? device=None, bool? pin_memory=None, +# MemoryFormat? memory_format=None) -> Tensor +# - Argument Conversion +# Generates C++ code to convert an ivalue (from stack) to its underlying C++ type. +# - int[] size +# ```cpp +# const c10::List size_list_in = (std::move(peek(stack, 0, 7))).toList(); +# +# std::vector size_vec; +# for (c10::IValue size_elem: size_list_in) { +# int64_t size_base = size_elem.to(); +# size_vec.push_back(size_base); +# } +# at::ArrayRef size_list_out(size_vec); +# ~~~~~~~~~~~~~ <-- The converted argument from ivalues in the stack. +# Will be passed to unboxed kernel. +# ``` +# - Dimname[]? names +# ```cpp +# c10::optional names_opt = (std::move(peek(stack, 1, 7))).toOptional(); +# c10::optional> names_opt_out; +# if (names_opt.has_value()) { +# ~~~~~~~~~~~ <-- Unwrapping optional shell +# const c10::IValue names_opt_in = names_opt.value(); +# const c10::List names_list_in = names_opt_in.toList(); +# +# std::vector names_vec; +# for (c10::IValue names_elem: names_list_in) { +# ~~~~~~~~~~~~~~~~~~~~~~~~~ <-- Unrolling list, then convert elements one by one. +# at::Dimname names_base = names_elem.to(); +# names_vec.push_back(names_base); +# } +# at::ArrayRef names_list_out(names_vec); +# +# names_opt_out = c10::optional>(names_list_out); +# } else { +# names_opt_out = c10::optional>(); +# } +# ``` +# - ScalarType? dtype (similarly for the rest of the arguments) +# ```cpp +# c10::optional dtype_opt = (std::move(peek(stack, 2, 7))).toOptional(); +# c10::optional dtype_opt_out; +# if (dtype_opt.has_value()) { +# const c10::IValue dtype_opt_in = dtype_opt.value(); +# at::ScalarType dtype_base = dtype_opt_in.to(); +# ~~~~~~~~~~~~~~~~~~~~ <-- For base types, convert ivalue to it +# directly using ".to()" API. +# dtype_opt_out = c10::optional(dtype_base); +# } else { +# dtype_opt_out = c10::optional(); +# } +# ``` +# +# - Unboxed Kernel Call +# ```cpp +# auto result_ = torch::empty( +# size_list_out, +# names_opt_out, +# options, +# memory_format_opt_out +# ); +# ``` +# +# - Push Result Back to Stack +# ```cpp +# drop(stack, 7); +# pack(stack, std::move(result_)); +# ``` +connector = "\n\t" + + +# Return unboxing function name for a NativeFunction +def name(f: NativeFunction) -> str: + return f.func.name.unambiguous_name() + + +# Convert all the arguments in a NativeFunction to C++ code +def convert_arguments(f: NativeFunction) -> Tuple[List[Binding], List[str]]: + # we need the 'self' argument so method needs to be False + args = ( + CppSignatureGroup.from_native_function(f, method=False) + .most_faithful_signature() + .arguments() + ) + code_list = [ + f"c10::IValue {args[i].name} = std::move(peek(stack, {i}, {len(args)}));" + for i in range(len(args)) + ] + [""] + binding_list = [] + for arg in args: + # expecting only Argument + if not isinstance(arg.argument, Argument): + raise Exception( + f"Unexpected argument type, expecting `Argument` but got {arg}" + ) + argument: Argument = arg.argument + unboxed_name, _, code, decl = argumenttype_ivalue_convert( + argument.type, + argument.name, + mutable=argument.is_write, + ) + code_list.extend(decl) + code_list.extend(code) + binding_list.append(arg.with_name(unboxed_name)) + return binding_list, code_list + + +# Takes in the type, name and mutability corresponding to an argument, and generates a tuple of: +# (1) the C++ code necessary to unbox the argument +# (2) A Binding corresponding to the newly created unboxed variable, including variable name and its CType +def argumenttype_ivalue_convert( + t: Type, arg_name: str, *, mutable: bool = False +) -> Tuple[str, CType, List[str], List[str]]: + # Unboxing is for mobile, which doesn't care about SymInts + ctype = cpp.argumenttype_type( + t=t, mutable=mutable, binds=arg_name, symint=False + ).type + + if isinstance(t, BaseType): + out_name = f"{arg_name}_base" + code, decl = _gen_code_base_type( + arg_name=arg_name, out_name=out_name, ctype=ctype + ) + elif isinstance(t, OptionalType): + out_name = f"{arg_name}_opt_out" + code, decl = _gen_code_optional_type( + arg_name=arg_name, + out_name=out_name, + t=t, + ctype=ctype, + ) + elif isinstance(t, ListType): + out_name = f"{arg_name}_list_out" + code, decl = _gen_code_list_type( + arg_name=arg_name, + out_name=out_name, + t=t, + ctype=ctype, + ) + else: + raise Exception(f"Cannot handle type {t}. arg_name: {arg_name}") + return out_name, ctype, code, decl + + +def _gen_code_base_type( + arg_name: str, out_name: str, ctype: CType +) -> Tuple[List[str], List[str]]: + return [ + f"{ctype.cpp_type(strip_ref=True)} {out_name} = {arg_name}.to<{ctype.cpp_type(strip_ref=True)}>();" + ], [] + + +def _gen_code_optional_type( + arg_name: str, out_name: str, t: OptionalType, ctype: CType +) -> Tuple[List[str], List[str]]: + in_name = f"{arg_name}_opt_in" + res_name, _, res_code, decl = argumenttype_ivalue_convert(t.elem, in_name) + return ( + f""" +c10::optional {arg_name}_opt = {arg_name}.toOptional(); +{ctype.cpp_type(strip_ref=True)} {out_name}; +if ({arg_name}_opt.has_value()) {{ + const c10::IValue {in_name} = {arg_name}_opt.value(); + {connector.join(res_code)} + {out_name} = {ctype.cpp_type(strip_ref=True)}({res_name}); +}} else {{ + {out_name} = {ctype.cpp_type(strip_ref=True)}(); +}} + """.split( + "\n" + ), + decl, + ) + + +def _gen_code_list_type( + arg_name: str, out_name: str, t: ListType, ctype: CType +) -> Tuple[List[str], List[str]]: + in_name = f"{arg_name}_list_in" + elem_name = f"{arg_name}_elem" + code = [f"const c10::List {in_name} = {arg_name}.toList();"] + res_name, res_ctype, res_code, decl = argumenttype_ivalue_convert(t.elem, elem_name) + # handle list type with size, e.g., bool[4] + if isinstance(t.elem, BaseType) and t.elem.name == BaseTy.bool and t.size: + code.extend( + f""" +{ctype.cpp_type(strip_ref=True)} {out_name} = as_array<{res_ctype.cpp_type(strip_ref=True)}, {t.size}>({in_name}); + """.split( + "\n" + ) + ) + # we have to use c10::List for optional element. e.g., Tensor?[] -> c10::List> + elif isinstance(t.elem, OptionalType): + code.extend( + f""" +{ctype.cpp_type(strip_ref=True)} {out_name}; +for (c10::IValue {elem_name}: {in_name}) {{ + {connector.join(res_code)} + {out_name}.push_back({res_name}); +}} + """.split( + "\n" + ) + ) + else: + # use ArrayRef as default. + vec_name = arg_name + "_vec" + # need to bring vector instantiation out of scope so that ArrayRef has valid data + decl.append(f"std::vector<{res_ctype.cpp_type(strip_ref=True)}> {vec_name};") + code.extend( + f""" +for (c10::IValue {elem_name}: {in_name}) {{ + {connector.join(res_code)} + {vec_name}.push_back({res_name}); +}} +{ctype.cpp_type(strip_ref=True)} {out_name}({vec_name}); + """.split( + "\n" + ) + ) + return code, decl diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyNativeFunctions.h b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyNativeFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..b45a17b5922f8a0b76e0237616914ce9969efca5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyNativeFunctions.h @@ -0,0 +1,19 @@ +#pragma once + +// an external backend might generate file within its code tree +// and check all the source files within the tree with clang-format. +// so, disable it since the backend might have a different config. +// clang-format off + +// ${generated_comment} + +#include + +${namespace_prologue} + +struct ${class_name} { + +${dispatch_declarations} + +}; +${namespace_epilogue} diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/LazyIr.h b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/LazyIr.h new file mode 100644 index 0000000000000000000000000000000000000000..1ee90e66cc6cedc616baa725c2fd562a7fcfdda2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/LazyIr.h @@ -0,0 +1,19 @@ +#pragma once + +// This file contains autogenerated LazyTensor IR nodes +${lazy_ir_sysinc} +${lazy_ir_inc} + +${namespace_prologue} +using at::operator<<; + +// kNullValue is used to contribute a static hash value any time +// a node has an Optional input that is nullopt. It is important +// to differentiate between HASH(nullopt, something) and HASH(something, nullopt), +// and using kNullValue in the hash function in the order of arguments +// serves this purpose. +static const torch::lazy::Value kNullValue = torch::lazy::Value(); + +${ir_declarations} + +${namespace_epilogue} diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/MethodOperators.h b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/MethodOperators.h new file mode 100644 index 0000000000000000000000000000000000000000..0e192cd05ef3c78fa74848c93de32150c1e3fd8b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/MethodOperators.h @@ -0,0 +1,24 @@ +#pragma once + +// ${generated_comment} + +#ifdef TORCH_ASSERT_NO_OPERATORS +#error This change adds a dependency on native_functions.yaml, \ + meaning the file will need to be re-compiled every time an operator \ + is changed or added. Consider if your change would be better placed in \ + another file, or if a more specific header might achieve the same goal. \ + See NOTE: [Tensor vs. TensorBase] +#endif + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +${MethodOperators_includes} + +namespace at { +namespace _ops { +${MethodOperators_declarations} +} // namespace _ops +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/NativeFunction.h b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/NativeFunction.h new file mode 100644 index 0000000000000000000000000000000000000000..4f70db62a4c6429ee8e782fb13fb0ae6ffc5d957 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/NativeFunction.h @@ -0,0 +1,17 @@ +#pragma once + +// ${generated_comment} + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +${extra_includes} + +${native_function_declarations} diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegisterDispatchKey.cpp b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegisterDispatchKey.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7a1584d505f5a3c42861fde0ea5ee4da67485a32 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegisterDispatchKey.cpp @@ -0,0 +1,54 @@ +// required for old g++ to compile PRId64 macros, see +// https://github.com/pytorch/pytorch/issues/3571 +// for context +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +// an external backend might generate file within its code tree +// and check all the source files within the tree with clang-format. +// so, disable it since the backend might have a different config. +// clang-format off + +// NOTE: This condition is true for all PyTorch internal libraries, it +// just excludes external projects such as torch_xla which +// re-use some of the PyTorch codegen machinery. +#if defined(CAFFE2_BUILD_MAIN_LIB) || \ + defined(TORCH_CUDA_BUILD_MAIN_LIB) || \ + defined(TORCH_HIP_BUILD_MAIN_LIB) || \ + defined(TORCH_CUDA_CU_BUILD_MAIN_LIB) || \ + defined(TORCH_CUDA_CPP_BUILD_MAIN_LIB) +#define TORCH_ASSERT_ONLY_METHOD_OPERATORS +#endif + +// ${generated_comment} + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +$extra_cuda_headers +$external_backend_headers +$dispatch_headers +$ops_headers + +// See template file RegisterDispatchDefinitions.ini +$dispatch_definitions diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/BUILD.bazel b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/BUILD.bazel new file mode 100644 index 0000000000000000000000000000000000000000..d1a0db360d230fe0f027c19869c6307f17010503 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/BUILD.bazel @@ -0,0 +1,4 @@ +load("//:tools/bazel.bzl", "rules") +load(":build.bzl", "define_targets") + +define_targets(rules = rules) diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55eef3dc50b219cf792b26c26fb0c78d54a417eb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/context.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/context.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7c8aae488b6699ec574579c234b5cae8cf258f8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/context.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_annotated_fn_args.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_annotated_fn_args.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a951e8354527ba88322e819be02c8c0b7a91285 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_annotated_fn_args.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_autograd.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_autograd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c186a62dd89c9da9ac4abfc88ccaf806c3b9f1b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_autograd.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_autograd_functions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_autograd_functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..867f48c7302247c43e57f3168cf60bed72a445e7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_autograd_functions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_inplace_or_view_type.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_inplace_or_view_type.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..442440148724658856d90a6b63db0f78d1a9c851 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_inplace_or_view_type.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_python_functions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_python_functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7485f17c05a62bdccf5cb43e9b94faca63c4496f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_python_functions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_trace_type.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_trace_type.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96f10ff0abf2648eb4d36e152e2c232ee38fe6e2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_trace_type.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_variable_factories.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_variable_factories.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52b401bd99c6be194e3fc78119951c767cf7e994 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_variable_factories.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_variable_type.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_variable_type.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b552bde45c6244dabd71a4197061904032867fc4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_variable_type.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/load_derivatives.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/load_derivatives.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d19b7617a0ec5c6ce2c24b4b9a5a215c0ba42542 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/load_derivatives.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/build.bzl b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/build.bzl new file mode 100644 index 0000000000000000000000000000000000000000..588bd5944e29477119782591b231fd80a7a57cf4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/build.bzl @@ -0,0 +1,14 @@ +def define_targets(rules): + rules.py_library( + name = "autograd", + srcs = rules.glob(["*.py"]), + data = rules.glob([ + "*.yaml", + "templates/*", + ]), + visibility = ["//:__subpackages__"], + deps = [ + rules.requirement("PyYAML"), + "//torchgen", + ], + ) diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/context.py b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/context.py new file mode 100644 index 0000000000000000000000000000000000000000..d838aa3c77bbbc0f37cd7fa6e005d85c9e9dd624 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/context.py @@ -0,0 +1,31 @@ +import functools +from typing import Callable + +from torchgen.api.autograd import NativeFunctionWithDifferentiabilityInfo as NFWDI +from torchgen.context import native_function_manager +from torchgen.utils import T + + +# Like tools.api.context.with_native_function, but for +# NativeFunctionWithDifferentiabilityInfo. +def with_native_function_with_differentiability_info( + func: Callable[[NFWDI], T] +) -> Callable[[NFWDI], T]: + @functools.wraps(func) + def wrapper(f: NFWDI) -> T: + with native_function_manager(f.func): + return func(f) + + return wrapper + + +# Like the above but with an additional dispatch key string argument +def with_native_function_with_differentiability_info_and_key( + func: Callable[[NFWDI, str], T] +) -> Callable[[NFWDI, str], T]: + @functools.wraps(func) + def wrapper(f: NFWDI, key: str) -> T: + with native_function_manager(f.func): + return func(f, key) + + return wrapper diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_annotated_fn_args.py b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_annotated_fn_args.py new file mode 100644 index 0000000000000000000000000000000000000000..f935a9adf4c6f126fe45999613ed9871280aac61 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_annotated_fn_args.py @@ -0,0 +1,129 @@ +""" +For procedural tests needed for __torch_function__, we use this function +to export method names and signatures as needed by the tests in +test/test_overrides.py. + +python -m tools.autograd.gen_annotated_fn_args \ + aten/src/ATen/native/native_functions.yaml \ + aten/src/ATen/native/tags.yaml \ + $OUTPUT_DIR \ + tools/autograd + +Where $OUTPUT_DIR is where you would like the files to be +generated. In the full build system, OUTPUT_DIR is +torch/testing/_internal/generated +""" + +import argparse +import os +import textwrap +from collections import defaultdict + +from typing import Any, Dict, List, Sequence + +import torchgen.api.python as python +from torchgen.context import with_native_function + +from torchgen.gen import parse_native_yaml +from torchgen.model import Argument, BaseOperatorName, NativeFunction +from torchgen.utils import FileManager + +from .gen_python_functions import ( + is_py_fft_function, + is_py_linalg_function, + is_py_nn_function, + is_py_special_function, + is_py_torch_function, + is_py_variable_method, + should_generate_py_binding, +) + + +def gen_annotated( + native_yaml_path: str, tags_yaml_path: str, out: str, autograd_dir: str +) -> None: + native_functions = parse_native_yaml( + native_yaml_path, tags_yaml_path + ).native_functions + mappings = ( + (is_py_torch_function, "torch._C._VariableFunctions"), + (is_py_nn_function, "torch._C._nn"), + (is_py_linalg_function, "torch._C._linalg"), + (is_py_special_function, "torch._C._special"), + (is_py_fft_function, "torch._C._fft"), + (is_py_variable_method, "torch.Tensor"), + ) + annotated_args: List[str] = [] + for pred, namespace in mappings: + groups: Dict[BaseOperatorName, List[NativeFunction]] = defaultdict(list) + for f in native_functions: + if not should_generate_py_binding(f) or not pred(f): + continue + groups[f.func.name.name].append(f) + for group in groups.values(): + for f in group: + annotated_args.append(f"{namespace}.{gen_annotated_args(f)}") + + template_path = os.path.join(autograd_dir, "templates") + fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False) + fm.write_with_template( + "annotated_fn_args.py", + "annotated_fn_args.py.in", + lambda: { + "annotated_args": textwrap.indent("\n".join(annotated_args), " "), + }, + ) + + +@with_native_function +def gen_annotated_args(f: NativeFunction) -> str: + def _get_kwargs_func_exclusion_list() -> List[str]: + # functions that currently don't work with kwargs in test_overrides.py + return [ + "diagonal", + "round_", + "round", + "scatter_", + ] + + def _add_out_arg( + out_args: List[Dict[str, Any]], args: Sequence[Argument], *, is_kwarg_only: bool + ) -> None: + for arg in args: + if arg.default is not None: + continue + out_arg: Dict[str, Any] = {} + out_arg["is_kwarg_only"] = str(is_kwarg_only) + out_arg["name"] = arg.name + out_arg["simple_type"] = python.argument_type_str( + arg.type, simple_type=True + ) + size_t = python.argument_type_size(arg.type) + if size_t: + out_arg["size"] = size_t + out_args.append(out_arg) + + out_args: List[Dict[str, Any]] = [] + _add_out_arg(out_args, f.func.arguments.flat_positional, is_kwarg_only=False) + if f"{f.func.name.name}" not in _get_kwargs_func_exclusion_list(): + _add_out_arg(out_args, f.func.arguments.flat_kwarg_only, is_kwarg_only=True) + + return f"{f.func.name.name}: {repr(out_args)}," + + +def main() -> None: + parser = argparse.ArgumentParser(description="Generate annotated_fn_args script") + parser.add_argument( + "native_functions", metavar="NATIVE", help="path to native_functions.yaml" + ) + parser.add_argument("tags", metavar="TAGS", help="path to tags.yaml") + parser.add_argument("out", metavar="OUT", help="path to output directory") + parser.add_argument( + "autograd", metavar="AUTOGRAD", help="path to template directory" + ) + args = parser.parse_args() + gen_annotated(args.native_functions, args.tags, args.out, args.autograd) + + +if __name__ == "__main__": + main() diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_inplace_or_view_type.py b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_inplace_or_view_type.py new file mode 100644 index 0000000000000000000000000000000000000000..ee1075cbed976873eb7095ca3615def67228754f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_inplace_or_view_type.py @@ -0,0 +1,613 @@ +# Generates ADInplaceOrViewType.h/cpp +# +# NOTE: If any changes are being made to the ADInplaceOrView codegen please also check +# if updates are needed in torch/csrc/autograd/autograd_not_implemented_fallback.cpp +# The fallback is expected to mimick this codegen, so we should keep the two in sync. + +from typing import Dict, List, Optional, Sequence, Tuple + +from torchgen.api import cpp +from torchgen.api.autograd import ( + dispatch_strategy, + gen_differentiable_outputs, + NativeFunctionWithDifferentiabilityInfo, +) +from torchgen.api.types import ( + BaseCType, + Binding, + boolT, + ConstRefCType, + CType, + DispatcherSignature, + intArrayRefT, + longT, + OptionalCType, + symIntArrayRefT, + SymIntT, + # See Note [Nested Arg Types] + tensorT, +) +from torchgen.code_template import CodeTemplate +from torchgen.context import with_native_function +from torchgen.model import ( + NativeFunction, + SchemaKind, + SelfArgument, + TensorOptionsArguments, + Type, +) +from torchgen.utils import FileManager + +from .context import with_native_function_with_differentiability_info +from .gen_trace_type import ( + get_return_value, + MANUAL_AUTOGRAD, + tie_return_values, + type_wrapper_name, +) + +# See NOTE [ Autograd View Variables ] in variable.h for details. +# If you update list VIEW_FUNCTIONS or RETURNS_VIEWS_OF_INPUT, +# you **MUST** also update the public list of view ops accordingly in +# docs/source/tensor_view.rst. Note not all ATen functions are exposed to public, +# e.g alias & sparse_coo_tensor_with_dims_and_tensors. +# +# A map: function name => name of the argument that all outputs are view of + +VIEW_FUNCTIONS_WITH_METADATA_CHANGE = [ + "view_as_complex", + "view_as_real", + "_conj", + "_neg_view", + "_nested_view_from_buffer", +] + +VIEW_FUNCTIONS = { + "numpy_T": "self", + "alias": "self", + "as_strided": "self", + "diagonal": "self", + "expand": "self", + "permute": "self", + "select": "self", + "slice": "self", + "split": "self", + "split_with_sizes": "self", + "squeeze": "self", + "t": "self", + "transpose": "self", + "unfold": "self", + "unsqueeze": "self", + "flatten": "self", + "view": "self", + "unbind": "self", + "_indices": "self", + "_values": "self", + "indices": "self", + "values": "self", + "crow_indices": "self", + "col_indices": "self", + "ccol_indices": "self", + "row_indices": "self", + # sparse_coo ctor output should really be views of both indices and values, + # but we only supports making as view of a single variable, and indices is + # discrete anyways. + # FIXME: clone indices on construction. + "sparse_coo_tensor_with_dims_and_tensors": "values", + "_reshape_alias": "self", + "_test_autograd_multiple_dispatch_view": "self", +} + +for key in VIEW_FUNCTIONS_WITH_METADATA_CHANGE: + VIEW_FUNCTIONS[key] = "self" + +# note: some VIEW_FUNCTIONS are just compositions of the view functions above +# this list contains both the root view functions and any that are purely composed +# of viewing functions, and is used by the JIT to determine when an operator +# may return a view of its inputs; however they may sometimes return a copy. +# (e.g. `contiguous`) +RETURNS_VIEWS_OF_INPUT = set(VIEW_FUNCTIONS.keys()).union( + { + "chunk", + "detach", + "contiguous", + "reshape", + "reshape_as", + "expand_as", + "view_as", + "real", + "imag", + "narrow", + "movedim", + "tensor_split", + "swapdims", + "swapaxes", + "mT", + "mH", + "adjoint", + "matrix_H", + } +) + +# These are the functions we consider views for the purposes of validating +# StorageImpl and TensorImpl in gen_variable_type. +# `_unsafe_view` is not included in VIEW_FUNCTIONS above because it is not a +# view for the purposes of ADInplaceOrView kernel, we do not want to call as_view +# See NOTE [Unsafe View] for more info. +ALL_VIEW_FUNCTIONS = { + **VIEW_FUNCTIONS, + "_unsafe_view": "self", +} + +ARRAYREF_TO_VEC = CodeTemplate( + """\ +auto ${vec} = ${arg}.vec(); +""" +) + +OPTIONAL_TO_VAL = CodeTemplate( + """\ +auto ${val} = ${arg}.value_or(${default}); +""" +) + +CALL_DISPATCH = CodeTemplate( + """\ +at::_ops::${unambiguous_name}::call(${unpacked_args})""" +) + +SETUP_REPLAY_VIEW_IF_NOT_SUPPORT_AS_STRIDED_OR_VIEW_WITH_METADATA_CHANGE = CodeTemplate( + """\ +std::function func=nullptr; +if (${is_view_with_metadata_change} || !self.unsafeGetTensorImpl()->support_as_strided() || + c10::AutogradState::get_tls_state().get_view_replay_enabled()) { + ${replay_view_func} +} +""" +) + +REPLAY_VIEW_LAMBDA_FUNC = CodeTemplate( + """\ +func = [=](const at::Tensor& ${input_base}) { + return ${replay_view_call}; +}; +""" +) + +METHOD_DEFINITION = CodeTemplate( + """\ +${return_type} ${type_wrapper_name}(${formals}) { + ${type_definition_body} +} +""" +) + +WRAPPER_REGISTRATION = CodeTemplate( + """\ +m.impl("${unqual_operator_name_with_overload}", + TORCH_FN(${class_type}::${type_wrapper_name}) +); +""" +) + +AUTOGRAD_NOT_IMPLEMENTED_REGISTRATION = CodeTemplate( + """\ +m.impl("${unqual_operator_name_with_overload}", torch::autograd::autogradNotImplementedFallback()); +""" +) + +INPLACE_REDISPATCH = CodeTemplate( + """\ +{ + at::AutoDispatchBelowADInplaceOrView guard; + at::_ops::${unambiguous_name}::redispatch(${unpacked_args}); +} +""" +) + +ASSIGN_RETURN_VALUE = CodeTemplate( + """\ +${return_values} = ${rhs_value}; +""" +) + +VIEW_REDISPATCH = CodeTemplate( + """\ +${assign_return_values} ([&]() { + at::AutoDispatchBelowADInplaceOrView guard; + return at::_ops::${unambiguous_name}::redispatch(${unpacked_args}); +})(); +""" +) + +TMP_VAR = "_tmp" + + +# FIXME: Ideally these functions should be methods on Type class, but we have a +# comment in codegen/model.py there saying these concepts are not well defined. +# Thus we put a version that commonly used by autograd codegen here. +def is_tensor_type(t: Type) -> bool: + # TODO: Should handle optional here? + return t.is_tensor_like() and t.is_list_like() is None + + +def is_tensor_list_type(t: Type) -> bool: + # TODO: Should handle optional here? + return t.is_tensor_like() and t.is_list_like() is not None + + +UNPACK_TENSOR = CodeTemplate( + """\ +auto${ref} ${arg_name}_ = unpack${suffix}(${arg_name}, "${arg_name}", ${arg_pos});""" +) + + +def unpacked_name(arg_name: str) -> str: + return arg_name + "_" + + +@with_native_function +def unpack_args(f: NativeFunction) -> Tuple[List[str], List[Binding]]: + body: List[str] = [] + unpacked_bindings: List[Binding] = [] + + bindings = [ + r + for a in f.func.schema_order_arguments() + for r in cpp.argument( + a, + method=False, + symint=True, + cpp_no_default_args=set(), + faithful=False, + has_tensor_options=False, + ) + ] + + for i, binding in enumerate(bindings): + assert not isinstance(binding.argument, SelfArgument) + if isinstance(binding.argument, TensorOptionsArguments): + raise RuntimeError("VariableKernel shouldn't take TensorOptions") + + is_nullable = binding.argument.type.is_nullable() + if not binding.argument.type.is_tensor_like() or is_nullable: + unpacked_bindings.append(binding) + continue + + is_tensor_list = is_tensor_list_type(binding.argument.type) + ref = (not is_nullable) and not is_tensor_list + suffix = "_opt" if is_nullable and not is_tensor_list else "" + body.append( + UNPACK_TENSOR.substitute( + arg_name=binding.name, + arg_pos=i, + suffix=suffix, + ref="&" if ref else "", + ) + ) + unpacked_bindings.append( + Binding( + name=unpacked_name(binding.name), + nctype=binding.nctype, + argument=binding.argument, + default=binding.default, + ) + ) + + return body, unpacked_bindings + + +def get_base_name(f: NativeFunction) -> str: + return f.func.name.name.base # TODO: should be str(f.func.name.name)? + + +def get_view_info(f: NativeFunction) -> Optional[str]: + base_name = get_base_name(f) + view_info = VIEW_FUNCTIONS.get(base_name, None) + if view_info is None and base_name in RETURNS_VIEWS_OF_INPUT: + view_info = "self" + return view_info + + +# For view replay calls, we generate an ordinary Dispatcher::call() instead, because: +# - We want to replay the entire call into the op, including any previously-set dispatch keys (including autograd!). +# - The view replay call also is not part of the hot path. +def emit_view_call( + f: NativeFunction, input_base: str, unpacked_args: Sequence[str] +) -> str: + # View replay functions use the standard Dispatcher::call API. + return CALL_DISPATCH.substitute( + unambiguous_name=f.func.name.unambiguous_name(), unpacked_args=unpacked_args + ) + + +def emit_view_lambda(f: NativeFunction, unpacked_bindings: List[Binding]) -> str: + """Generate an additional lambda function to recover views in backward when as_strided is not supported. + See Note [View + Inplace update for base tensor] and [View + Inplace update for view tensor] for more details. + """ + input_base = "input_base" + replay_view_func = "" + updated_unpacked_args: List[str] = [] + known_view_arg_simple_types: List[CType] = [ + BaseCType(longT), + OptionalCType(BaseCType(longT)), + BaseCType(SymIntT), + OptionalCType(BaseCType(SymIntT)), + BaseCType(boolT), + BaseCType(intArrayRefT), + BaseCType(symIntArrayRefT), + ConstRefCType(BaseCType(tensorT)), + ] + for unpacked_binding in unpacked_bindings: + arg, arg_type = unpacked_binding.name, unpacked_binding.nctype.type + if arg == "self_": + updated_unpacked_args.append(input_base) + continue + if arg_type not in known_view_arg_simple_types: + known_types_str = ", ".join([str(t) for t in known_view_arg_simple_types]) + raise TypeError( + f"You are adding an {arg_type} {arg} argument to op {cpp.name(f.func)} in addition to known types: " + f"{known_types_str}. Please update the list or materialize it so that it can be closed " + "over by value, also add a test in pytorch/xla/test/test_operations.py where this code " + "is exercised." + ) + if arg_type == BaseCType(intArrayRefT) or arg_type == BaseCType( + symIntArrayRefT + ): + # It's not safe to close over IntArrayRef by value, since this is a + # reference type, so materialize a vector to close over by value + arg_vec = arg + "_vec" + replay_view_func += ARRAYREF_TO_VEC.substitute(arg=arg, vec=arg_vec) + updated_unpacked_args.append(arg_vec) + elif arg_type == OptionalCType(BaseCType(longT)): + # Materialize int64_t? to int64_t + arg_value = arg + "_val" + replay_view_func += OPTIONAL_TO_VAL.substitute( + arg=arg, val=arg_value, default="0" + ) + updated_unpacked_args.append(arg_value) + elif ( + arg == "nested_size_" or arg == "nested_strides_" or arg == "offsets_" + ) and arg_type == ConstRefCType(BaseCType(tensorT)): + # [NOTE] [Nested Arg Types] + # This is temporary. Nested tensors will be migrating to use SymInts and + # nested_size and nested_strides will no longer be tensors. + updated_unpacked_args.append(arg[:-1]) + else: + updated_unpacked_args.append(arg) + + replay_view_call = emit_view_call(f, input_base, updated_unpacked_args) + replay_view_func += REPLAY_VIEW_LAMBDA_FUNC.substitute( + input_base=input_base, replay_view_call=replay_view_call + ) + + is_view_with_metadata_change = ( + "true" if cpp.name(f.func) in VIEW_FUNCTIONS_WITH_METADATA_CHANGE else "false" + ) + + return SETUP_REPLAY_VIEW_IF_NOT_SUPPORT_AS_STRIDED_OR_VIEW_WITH_METADATA_CHANGE.substitute( + is_view_with_metadata_change=is_view_with_metadata_change, + replay_view_func=replay_view_func, + ) + + +def emit_view_body( + fn: NativeFunctionWithDifferentiabilityInfo, var: str +) -> Tuple[str, str]: + # See NOTE [ Autograd View Variables ] in variable.h for details. + f = fn.func + base_name = get_base_name(f) + view_info = get_view_info(f) + call = "" + differentiable_outputs = gen_differentiable_outputs(fn) + differentiable_output_vars = {r.name for r in differentiable_outputs} + if not isinstance(view_info, str): + raise TypeError( + f"The view info should be a string for {base_name}, but it is: {view_info}" + ) + if len(differentiable_output_vars) == 0: + # no output is differentiable (.indices() for SparseTensors for example) + rhs_value = ( + f"as_view({view_info}, {var}, " + f"/* is_bw_differentiable */ false, /* is_fw_differentiable */ false)" + ) + elif len(differentiable_output_vars) == 1: + # Single differentiable output (Tensor or Tensor[]) + return_info = differentiable_outputs[0] + # We only support simple Tensor or a TensorList for functions that return views + if not is_tensor_type(return_info.type) and not is_tensor_list_type( + return_info.type + ): + raise RuntimeError( + f"{base_name} that return differentiable views can only return Tensor or Tensor[]" + ) + + # See Note [ View + Inplace detection] + def get_creation_meta_in_mode(original: str) -> str: + creation_meta_with_grad_mode = f"(at::GradMode::is_enabled() ? {original} : CreationMeta::NO_GRAD_MODE)" + return f"InferenceMode::is_enabled() ? CreationMeta::INFERENCE_MODE : {creation_meta_with_grad_mode}" + + # Only allow rebasing of the history if we return a single Tensor + # If we are in a no grad block, raise a warning + # See NOTE [ View + Inplace detection ] for more details about this logic + if is_tensor_list_type(return_info.type): + creation_meta = get_creation_meta_in_mode("CreationMeta::MULTI_OUTPUT_NODE") + call += ( + f"as_view(/* base */ {view_info}, /* output */ {var}, /* is_bw_differentiable */ true, " + "/* is_fw_differentiable */ true, " + f"/* creation_meta */ {creation_meta});" + ) + rhs_value = f"std::move({var})" + else: + _, unpacked_bindings = unpack_args(f) + call += emit_view_lambda(f, unpacked_bindings) + creation_meta = get_creation_meta_in_mode("CreationMeta::DEFAULT") + rhs_value = ( + f"as_view(/* base */ {view_info}, /* output */ {var}, /* is_bw_differentiable */ true, " + "/* is_fw_differentiable */ true, " + f"/* view_func */ func, /* creation_meta */ {creation_meta})" + ) + else: + # This could be supported but we don't need it at the moment, so keeping things simple. + raise RuntimeError( + "Function that return multiple differentiable output " + "when at least one of them is view is not supported." + ) + return call, rhs_value + + +def modifies_arguments(f: NativeFunction) -> bool: + return f.func.kind() in [SchemaKind.inplace, SchemaKind.out] + + +@with_native_function_with_differentiability_info +def emit_inplace_or_view_body(fn: NativeFunctionWithDifferentiabilityInfo) -> List[str]: + f = fn.func + inplace_view_body: List[str] = [] + + dispatcher_sig = DispatcherSignature.from_schema(f.func) + dispatcher_exprs = dispatcher_sig.exprs() + + # code-generated ADInplaceOrView kernels plumb and recompute dispatch keys directly through the kernel for performance. + # See Note [Plumbing Keys Through The Dispatcher] for details. + dispatch_key_set = "ks & c10::after_ADInplaceOrView_keyset" + redispatch_args = ", ".join([dispatch_key_set] + [a.expr for a in dispatcher_exprs]) + + # Note that this calls the slow, dispatching variants of manual_cpp_binding ops. + # We could probably work harder to ensure that the fast variants are called instead, but the perf benefit would be minimal. + if modifies_arguments(f): # inplace op + inplace_view_body.append( + INPLACE_REDISPATCH.substitute( + unambiguous_name=f.func.name.unambiguous_name(), + unpacked_args=redispatch_args, + ) + ) + for r in cpp.return_names(f): + inplace_view_body.append(f"increment_version({r});") + else: + assert get_view_info(f) is not None + inplace_view_body.append( + VIEW_REDISPATCH.substitute( + assign_return_values="auto " + TMP_VAR + " = ", + unambiguous_name=f.func.name.unambiguous_name(), + unpacked_args=redispatch_args, + ) + ) + call, rhs_value = emit_view_body(fn, TMP_VAR) + inplace_view_body.append(call) + assert rhs_value is not None + inplace_view_body.append( + ASSIGN_RETURN_VALUE.substitute( + return_values=tie_return_values(f), rhs_value=rhs_value + ) + ) + if f.func.returns: + inplace_view_body.append(f"return {get_return_value(f)};") + return inplace_view_body + + +@with_native_function +def gen_formals(f: NativeFunction) -> str: + return ", ".join( + # code-generated autograd kernels plumb and recompute dispatch keys directly through the kernel for performance. + # See Note [Plumbing Keys Through The Dispatcher] for details. + ["c10::DispatchKeySet ks"] + + [ + f'{cpp.argument_type(a, binds="__placeholder__", symint=True).cpp_type()} {a.name}' + for a in f.func.schema_order_arguments() + ] + ) + + +@with_native_function_with_differentiability_info +def inplace_or_view_method_definition( + fn: NativeFunctionWithDifferentiabilityInfo, +) -> Optional[str]: + f = fn.func + if get_view_info(f) is None and ( + # For functions that modify their inputs but don't return them, + # we can't give them autograd support. + # See https://github.com/pytorch/pytorch/issues/53796 + not modifies_arguments(f) + or len(f.func.returns) == 0 + ): + return None + return METHOD_DEFINITION.substitute( + return_type=cpp.returns_type(f.func.returns, symint=True).cpp_type(), + type_wrapper_name=type_wrapper_name(f), + formals=gen_formals(f), + type_definition_body=emit_inplace_or_view_body(fn), + ) + + +@with_native_function_with_differentiability_info +def inplace_or_view_method_registration( + fn: NativeFunctionWithDifferentiabilityInfo, +) -> Optional[str]: + f = fn.func + if get_view_info(f) is None and ( + not modifies_arguments(f) or len(f.func.returns) == 0 + ): + return None + return WRAPPER_REGISTRATION.substitute( + unqual_operator_name_with_overload=f.func.name, + type_wrapper_name=type_wrapper_name(f), + class_type="ADInplaceOrView", + ) + + +def use_derived(fn: NativeFunctionWithDifferentiabilityInfo) -> bool: + f = fn.func + name = cpp.name(f.func) + return name not in MANUAL_AUTOGRAD and dispatch_strategy(fn) == "use_derived" + + +def gen_inplace_or_view_type_env( + fn: NativeFunctionWithDifferentiabilityInfo, +) -> Dict[str, List[str]]: + definition = inplace_or_view_method_definition(fn) + registration = inplace_or_view_method_registration(fn) + + return { + "ops_headers": ( + [f"#include "] + if definition is not None + else [] + ), + "inplace_or_view_method_definitions": [definition] + if definition is not None + else [], + "inplace_or_view_wrapper_registrations": [registration] + if registration is not None + else [], + } + + +def gen_inplace_or_view_type( + out: str, + native_yaml_path: str, + tags_yaml_path: str, + fns_with_infos: List[NativeFunctionWithDifferentiabilityInfo], + template_path: str, +) -> None: + # NOTE: see Note [Sharded File] at the top of the VariableType.cpp + # template regarding sharding of the generated files. + num_shards = 2 + + fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False) + fm.write_sharded( + "ADInplaceOrViewType.cpp", + [fn for fn in fns_with_infos if use_derived(fn)], + key_fn=lambda fn: fn.func.root_name, + base_env={ + "generated_comment": "@" + + f"generated from {fm.template_dir_for_comments()}/ADInplaceOrViewType.cpp", + }, + env_callable=gen_inplace_or_view_type_env, + num_shards=2, + sharded_keys={ + "ops_headers", + "inplace_or_view_method_definitions", + "inplace_or_view_wrapper_registrations", + }, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_python_functions.py b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_python_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..2c7ce19ff69bd2bff7edc38ee067420ca4f5719a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_python_functions.py @@ -0,0 +1,1377 @@ +# Generates Python bindings for ATen functions +# +# The bindings are generated as methods on python_variable or functions on the +# torch._C._nn. torch._C._fft, torch._C._linalg, torch._C._nested, torch._C._sparse +# or torch._C._special objects. +# + +# Code tries to stick to the following rules: +# +# - templates should be colocated with the functions that use them. +# no templates are currently shared between functions, but if that +# happens, maybe put the template with the first one +# +# - don't use environment dictionaries when calling template.substitute(). +# pass named arguments directly for everything, otherwise it's much too +# hard to track what's actually being used and by who +# +# - colocate any new hacks/adjustments with existing ones of the same kind. +# ideally in a data structure rather than code if possible. See e.g. +# SCHEMA_DEFAULT_CONVERSION_HACKS, etc. +# +# - similarly, conversions from one format to another should ideally happen +# all at once in a single place. +# +# - no nontrivial nested functions. couple-liners are ok but please no more. +# especially avoid functions that read/write outer variables defined far away. +# +# - raise RuntimeError instead of asserting, and put as much +# information as is available into the message. I.e. no need to +# plumb in new params whose only purpose is to fill out an error +# message, but use what's there +# + +import itertools +import re +from collections import defaultdict + +from typing import Callable, Dict, Iterable, List, Optional, Sequence, Set, Tuple + +import yaml +from torchgen.api import cpp +from torchgen.api.python import ( + arg_parser_output_exprs, + cpp_dispatch_exprs, + cpp_dispatch_target, + dispatch_lambda_args, + dispatch_lambda_exprs, + dispatch_lambda_return_str, + has_tensor_options, + namedtuple_fieldnames, + PythonSignature, + PythonSignatureDeprecated, + PythonSignatureGroup, + PythonSignatureNativeFunctionPair, + signature, + signature_from_schema, +) + +from torchgen.code_template import CodeTemplate +from torchgen.context import with_native_function +from torchgen.gen import cpp_string, parse_native_yaml, parse_tags_yaml +from torchgen.model import ( + Argument, + BaseOperatorName, + FunctionSchema, + NativeFunction, + Type, + Variant, +) +from torchgen.utils import FileManager, split_name_params +from torchgen.yaml_utils import YamlLoader + +from .gen_trace_type import should_trace + +# +# declarations blocklist +# We skip codegen for these functions, for various reasons. +# Future PRs will categorize this list and eliminate or hoist +# them out of eager-only codegen. +# See https://github.com/pytorch/pytorch/issues/30788 +# + +# These functions require manual Python bindings or are not exposed to Python +_SKIP_PYTHON_BINDINGS = [ + "alias", + "contiguous", + "is_cuda", + "is_sparse", + "is_sparse_csr", + "size", + "stride", + "sym_size", + "sym_stride", + "sym_storage_offset", + "sym_numel", + ".*_backward", + ".*_backward_(out|input|weight|bias)", + ".*_forward", + ".*_forward_out", + ".*_jvp", + "_unsafe_view", + "tensor", + "_?sparse_(coo|compressed|csr|csc|bsr|bsc)_tensor.*", + "_range.*", + "_sparse_add_out", + "_sparse_div.*", + "_sparse_mul.*", + "_sparse_sub.*", + "_sparse_dense_add_out", + "index", + "index_out", + "unique_dim_consecutive", + "_cumsum.*", + "_cumprod.*", + "_sum.*", + "_prod.*", + "_th_.*", + "_thnn_.*", + "range.*", + "_solve.*", + "_inverse.*", + "_cholesky.*", + "_triangular_solve.*", + "_qr.*", + "_svd.*", + "slice", + "item", + "_local_scalar_dense", + "to", + "_to_copy", + "_to_copy_out", + "_reshape_copy", + "_reshape_copy_out", + "copy_sparse_to_sparse_", + "copy_", + "numpy_T", + "matrix_H", + "mT", + "mH", # these need to be an attributes in Python, not functions + "nonzero(_(out|numpy))?", + "set_data", + ".*_overrideable", # overrideable functions for backend extension + "data", + "is_leaf", + "output_nr", + "_version", + "requires_grad_", + "retains_grad", + "set_", + "_fw_primal", + "fake_quantize_per_tensor_affine_cachemask", + "fake_quantize_per_channel_affine_cachemask", + "_new_zeros_with_same_feature_meta", + "_has_same_storage_numel", # used for forward AD internals + "_reshape_alias", + "replace_", # only used by the functionalization pass, doesn't need to be exposed to python + "copy", # only used by the functionalization pass + "fill.Tensor", # only used by the functionalization pass + "fill.Scalar", # only used by the functionalization pass + "lift.*", + "normal_functional", # only used by the functionalization pas + "nbytes", + "itemsize", +] + +SKIP_PYTHON_BINDINGS = [ + re.compile(rf"^{pattern}$") for pattern in _SKIP_PYTHON_BINDINGS +] + +# These function signatures are not exposed to Python. Note that this signature +# list does not support regex. +SKIP_PYTHON_BINDINGS_SIGNATURES = [ + "add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", + "add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)", + "sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", + "sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)", + "mul.Scalar(Tensor self, Scalar other) -> Tensor", + "mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", + "div.Scalar(Tensor self, Scalar other) -> Tensor", + "div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", +] + + +@with_native_function +def should_generate_py_binding(f: NativeFunction) -> bool: + # NativeFunctions that are entirely code-generated should not get python bindings + # because these codegen implementations are often inefficient. A handful of + # view_copy style ops were exposed accidentally when they were handwritten and now + # that we are moving them to codegen for bc reasons we need to keep them exposed in + # python. + if "generated" in f.tags and "view_copy" not in f.tags: + return False + + name = cpp.name(f.func) + for skip_regex in SKIP_PYTHON_BINDINGS: + if skip_regex.match(name): + return False + + signature = str(f.func) + for pattern in SKIP_PYTHON_BINDINGS_SIGNATURES: + if pattern == signature: + return False + return True + + +def get_pycname(name: BaseOperatorName) -> str: + return f"THPVariable_{name}" + + +def is_noarg(overloads: Sequence[PythonSignatureNativeFunctionPair]) -> bool: + return len(overloads) == 1 and overloads[0].signature.arguments_count() == 0 + + +def is_py_variable_method(f: NativeFunction) -> bool: + return f.python_module is None and Variant.method in f.variants + + +def is_py_torch_function(f: NativeFunction) -> bool: + return f.python_module is None and Variant.function in f.variants + + +def is_py_nn_function(f: NativeFunction) -> bool: + return f.python_module == "nn" + + +def is_py_fft_function(f: NativeFunction) -> bool: + return f.python_module == "fft" + + +def is_py_linalg_function(f: NativeFunction) -> bool: + return f.python_module == "linalg" + + +def is_py_nested_function(f: NativeFunction) -> bool: + return f.python_module == "nested" + + +def is_py_sparse_function(f: NativeFunction) -> bool: + return f.python_module == "sparse" + + +def is_py_special_function(f: NativeFunction) -> bool: + return f.python_module == "special" + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# +# Main Function +# +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # + + +def gen( + out: str, + native_yaml_path: str, + tags_yaml_path: str, + deprecated_yaml_path: str, + template_path: str, + *, + symint: bool = True, +) -> None: + fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False) + native_functions = parse_native_yaml( + native_yaml_path, tags_yaml_path + ).native_functions + native_functions = list(filter(should_generate_py_binding, native_functions)) + + methods = load_signatures(native_functions, deprecated_yaml_path, method=True) + create_python_bindings( + fm, + methods, + is_py_variable_method, + None, + "python_variable_methods.cpp", + method=True, + symint=symint, + ) + + # NOTE: num_shards here must be synced with gatherTorchFunctions in + # torch/csrc/autograd/python_torch_functions_manual.cpp + functions = load_signatures(native_functions, deprecated_yaml_path, method=False) + create_python_bindings_sharded( + fm, + functions, + is_py_torch_function, + "torch", + "python_torch_functions.cpp", + method=False, + num_shards=3, + symint=symint, + ) + + create_python_bindings( + fm, + functions, + is_py_nn_function, + "torch.nn", + "python_nn_functions.cpp", + method=False, + symint=symint, + ) + + create_python_bindings( + fm, + functions, + is_py_fft_function, + "torch.fft", + "python_fft_functions.cpp", + method=False, + symint=symint, + ) + + create_python_bindings( + fm, + functions, + is_py_linalg_function, + "torch.linalg", + "python_linalg_functions.cpp", + method=False, + symint=symint, + ) + + create_python_bindings( + fm, + functions, + is_py_nested_function, + "torch.nested", + "python_nested_functions.cpp", + method=False, + ) + + create_python_bindings( + fm, + functions, + is_py_sparse_function, + "torch.sparse", + "python_sparse_functions.cpp", + method=False, + symint=symint, + ) + + create_python_bindings( + fm, + functions, + is_py_special_function, + "torch.special", + "python_special_functions.cpp", + method=False, + symint=symint, + ) + + # Currently, we only use `functions` to generate `return_types` bindings. + # All methods which return namedtuple have function variant at this point. + # If any method only operator with namedtuple is added in the future, + # we will have to address that. + create_python_return_type_bindings( + fm, functions, lambda fn: True, "python_return_types.cpp" + ) + create_python_return_type_bindings_header( + fm, functions, lambda fn: True, "python_return_types.h" + ) + + valid_tags = parse_tags_yaml(tags_yaml_path) + + def gen_tags_enum() -> Dict[str, str]: + return { + "enum_of_valid_tags": ( + "".join( + [f'\n.value("{tag}", at::Tag::{tag})' for tag in sorted(valid_tags)] + ) + ) + } + + fm.write("python_enum_tag.cpp", gen_tags_enum) + + +def group_filter_overloads( + pairs: Sequence[PythonSignatureNativeFunctionPair], + pred: Callable[[NativeFunction], bool], +) -> Dict[BaseOperatorName, List[PythonSignatureNativeFunctionPair]]: + grouped: Dict[ + BaseOperatorName, List[PythonSignatureNativeFunctionPair] + ] = defaultdict(list) + for pair in pairs: + if pred(pair.function): + grouped[pair.function.func.name.name].append(pair) + return grouped + + +def create_python_bindings( + fm: FileManager, + pairs: Sequence[PythonSignatureNativeFunctionPair], + pred: Callable[[NativeFunction], bool], + module: Optional[str], + filename: str, + *, + method: bool, + symint: bool = True, +) -> None: + """Generates Python bindings to ATen functions""" + py_methods: List[str] = [] + ops_headers: List[str] = [] + py_method_defs: List[str] = [] + py_forwards: List[str] = [] + + grouped = group_filter_overloads(pairs, pred) + + for name in sorted(grouped.keys(), key=str): + overloads = grouped[name] + py_methods.append( + method_impl(name, module, overloads, method=method, symint=symint) + ) + py_method_defs.append(method_def(name, module, overloads, method=method)) + py_forwards.extend(forward_decls(name, overloads, method=method)) + ops_headers.append(f"#include ") + + fm.write_with_template( + filename, + filename, + lambda: { + "generated_comment": "@" + + f"generated from {fm.template_dir_for_comments()}/{filename}", + "ops_headers": ops_headers, + "py_forwards": py_forwards, + "py_methods": py_methods, + "py_method_defs": py_method_defs, + }, + ) + + +def create_python_return_type_bindings( + fm: FileManager, + pairs: Sequence[PythonSignatureNativeFunctionPair], + pred: Callable[[NativeFunction], bool], + filename: str, +) -> None: + """ + Generate function to initialize and return named tuple for native functions + which returns named tuple and registration invocations in `python_return_types.cpp`. + """ + py_return_types_definition: List[str] = [] + py_return_types_registrations: List[str] = [] + + grouped = group_filter_overloads(pairs, pred) + + for name in sorted(grouped.keys(), key=str): + overloads = grouped[name] + definitions, registrations = generate_return_type_definition_and_registrations( + overloads + ) + py_return_types_definition.append( + "" if not definitions else "\n".join(definitions) + ) + py_return_types_registrations.append( + "" if not registrations else "\n".join(registrations) + ) + + fm.write_with_template( + filename, + filename, + lambda: { + "generated_comment": "@" + + f"generated from {fm.template_dir_for_comments()}/{filename}", + "py_return_types": py_return_types_definition, + "py_return_types_registrations": py_return_types_registrations, + }, + ) + + +def create_python_return_type_bindings_header( + fm: FileManager, + pairs: Sequence[PythonSignatureNativeFunctionPair], + pred: Callable[[NativeFunction], bool], + filename: str, +) -> None: + """ + Generate function to initialize and return named tuple for native functions + which returns named tuple and relevant entry for the map in `python_return_types.cpp`. + """ + py_return_types_declarations: List[str] = [] + + grouped = group_filter_overloads(pairs, pred) + + for name in sorted(grouped.keys(), key=str): + overloads = grouped[name] + declarations = generate_return_type_declarations(overloads) + py_return_types_declarations.append( + "" if not declarations else "\n".join(declarations) + ) + + fm.write_with_template( + filename, + filename, + lambda: { + "generated_comment": "@" + + f"generated from {fm.template_dir_for_comments()}/{filename}", + "py_return_types_declarations": py_return_types_declarations, + }, + ) + + +def create_python_bindings_sharded( + fm: FileManager, + pairs: Sequence[PythonSignatureNativeFunctionPair], + pred: Callable[[NativeFunction], bool], + module: Optional[str], + filename: str, + *, + method: bool, + num_shards: int, + symint: bool = True, +) -> None: + """Generates Python bindings to ATen functions""" + grouped = group_filter_overloads(pairs, pred) + + def key_func( + kv: Tuple[BaseOperatorName, List[PythonSignatureNativeFunctionPair]] + ) -> str: + return kv[0].base + + def env_func( + kv: Tuple[BaseOperatorName, List[PythonSignatureNativeFunctionPair]] + ) -> Dict[str, List[str]]: + name, fn_pairs = kv + return { + "ops_headers": [f"#include "], + "py_forwards": list(forward_decls(name, fn_pairs, method=method)), + "py_methods": [ + method_impl(name, module, fn_pairs, method=method, symint=symint) + ], + "py_method_defs": [method_def(name, module, fn_pairs, method=method)], + } + + fm.write_sharded( + filename, + grouped.items(), + base_env={ + "generated_comment": "@" + + f"generated from {fm.template_dir_for_comments()}/{filename}", + }, + key_fn=key_func, + env_callable=env_func, + num_shards=num_shards, + sharded_keys={"ops_headers", "py_forwards", "py_methods", "py_method_defs"}, + ) + + +def load_signatures( + native_functions: List[NativeFunction], + deprecated_yaml_path: str, + *, + method: bool, + skip_deprecated: bool = False, + pyi: bool = False, +) -> Sequence[PythonSignatureNativeFunctionPair]: + @with_native_function + def gen_signature_pairs(f: NativeFunction) -> PythonSignatureNativeFunctionPair: + return PythonSignatureNativeFunctionPair( + signature=signature(f, method=method, pyi=pyi), + function=f, + ) + + pairs = list(map(gen_signature_pairs, native_functions)) + deprecated = load_deprecated_signatures( + pairs, deprecated_yaml_path, method=method, pyi=pyi + ) + return pairs if skip_deprecated else pairs + deprecated + + +def load_deprecated_signatures( + pairs: Sequence[PythonSignatureNativeFunctionPair], + deprecated_yaml_path: str, + *, + method: bool, + pyi: bool, +) -> List[PythonSignatureNativeFunctionPair]: + # The deprecated.yaml doesn't have complete type information, we need + # find and leverage the original ATen signature (to which it delegates + # the call) to generate the full python signature. + # We join the deprecated and the original signatures using type-only form. + + # group the original ATen signatures by name + grouped: Dict[str, List[PythonSignatureNativeFunctionPair]] = defaultdict(list) + for pair in pairs: + grouped[pair.signature.name].append(pair) + + # find matching original signatures for each deprecated signature + results: List[PythonSignatureNativeFunctionPair] = [] + + with open(deprecated_yaml_path) as f: + deprecated_defs = yaml.load(f, Loader=YamlLoader) + + for deprecated in deprecated_defs: + schema = FunctionSchema.parse(deprecated["name"]) + aten_name, call_args = split_name_params(deprecated["aten"]) + is_out = aten_name.endswith("_out") + if is_out: + aten_name = aten_name.replace("_out", "") + + # HACK: these are fixed constants used to pass the aten function. + # The type must be known ahead of time + known_constants = { + "1": Type.parse("Scalar"), + } + schema_args_by_name = {a.name: a for a in schema.arguments.flat_all} + for name in call_args: + assert ( + name in schema_args_by_name or name in known_constants + ), f"deprecation definiton: Unrecognized value {name}" + + # Map deprecated signature arguments to their aten signature and test + # if the types and alias annotation match. + def is_schema_compatible( + aten_schema: FunctionSchema, + ) -> bool: + arguments: Iterable[Argument] + if is_out: + arguments = itertools.chain( + aten_schema.arguments.out, aten_schema.arguments.flat_non_out + ) + else: + arguments = aten_schema.arguments.flat_all + + for i, arg in enumerate(arguments): + if i < len(call_args): + arg_name = call_args[i] + if arg_name in known_constants: + schema_type = known_constants[arg_name] + schema_annotation = None + else: + schema_arg = schema_args_by_name[arg_name] + schema_type = schema_arg.type + schema_annotation = schema_arg.annotation + + if schema_type != arg.type or schema_annotation != arg.annotation: + return False + else: + if arg.default is None: + return False + + return len(schema.returns) == len(aten_schema.returns) and all( + a == b for a, b in zip(schema.returns, aten_schema.returns) + ) + + any_schema_found = False + for pair in grouped[aten_name]: + if not is_schema_compatible(pair.function.func): + continue + any_schema_found = True + + python_sig = signature_from_schema( + schema, + category_override=pair.function.category_override, + method=method, + pyi=pyi, + ) + + results.append( + PythonSignatureNativeFunctionPair( + signature=PythonSignatureDeprecated( + name=python_sig.name, + input_args=python_sig.input_args, + input_kwargs=python_sig.input_kwargs, + output_args=python_sig.output_args, + tensor_options_args=python_sig.tensor_options_args, + method=python_sig.method, + deprecated_schema=schema, + deprecated_args_exprs=tuple(call_args), + returns=python_sig.returns, + ), + function=pair.function, + ) + ) + assert ( + any_schema_found + ), f"No native function with name {aten_name} matched signature:\n {str(schema)}" + + return results + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# +# Named Tuple Codegen +# +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # + + +@with_native_function +def gen_namedtuple_typename_key(f: NativeFunction) -> str: + name = cpp.name(f.func) + fieldnames = namedtuple_fieldnames(f.func.returns) + return "_".join([name] + fieldnames) + + +def emit_namedtuple_call( + overloads: Sequence[PythonSignatureNativeFunctionPair], +) -> Tuple[List[str], Dict[str, str]]: + """ + Generate block of named tuple type def inits, and add typeref snippets + to declarations that use them + """ + typenames: Dict[ + str, str + ] = {} # map from unique name + field name lists to typedef name + typedefs: List[str] = [] # typedef declarations and init code + + for overload in overloads: + fieldnames = namedtuple_fieldnames(overload.function.func.returns) + if not fieldnames: + continue + + name = cpp.name(overload.function.func) # use @with_native_function? + tn_key = gen_namedtuple_typename_key(overload.function) + typename = typenames.get(tn_key) + if typename is None: + typename = f'NamedTuple{"" if not typedefs else len(typedefs)}' + typenames[tn_key] = typename + typedefs.append( + f"""\ +static PyTypeObject* {typename} = generated::get_{name}_namedtuple();""" + ) + + return typedefs, typenames + + +def generate_return_type_definition_and_registrations( + overloads: Sequence[PythonSignatureNativeFunctionPair], +) -> Tuple[List[str], List[str]]: + """ + Generate block of function in `python_return_types.cpp` to initialize + and return named tuple for a native function which returns named tuple + and registration invocations in same file. + """ + typenames: Dict[ + str, str + ] = {} # map from unique name + field name lists to typedef name + definitions: List[str] = [] # function definition to register the typedef + registrations: List[str] = [] # register call for the typedef + + for overload in overloads: + fieldnames = namedtuple_fieldnames(overload.function.func.returns) + if not fieldnames: + continue + + fields = ", ".join(f'{{"{fn}", ""}}' for fn in fieldnames) + + name = cpp.name(overload.function.func) # use @with_native_function? + tn_key = gen_namedtuple_typename_key(overload.function) + typename = typenames.get(tn_key) + + if typename is None: + typename = f'{name}NamedTuple{"" if not definitions else len(definitions)}' + typenames[tn_key] = typename + definitions.append( + f"""\ +PyTypeObject* get_{name}_namedtuple() {{ + static PyStructSequence_Field NamedTuple_fields[] = {{ {fields}, {{nullptr}} }}; + static PyTypeObject {typename}; + static bool is_initialized = false; + static PyStructSequence_Desc desc = {{ "torch.return_types.{name}", nullptr, NamedTuple_fields, {len(fieldnames)} }}; + if (!is_initialized) {{ + PyStructSequence_InitType(&{typename}, &desc); + {typename}.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; + is_initialized = true; + }} + return &{typename}; +}} +""" + ) + registrations.append( + f'addReturnType(return_types_module, "{name}", generated::get_{name}_namedtuple());' + ) + + return definitions, registrations + + +def generate_return_type_declarations( + overloads: Sequence[PythonSignatureNativeFunctionPair], +) -> List[str]: + """ + Generate block of function declarations in `python_return_types.h` to initialize + and return named tuple for a native function. + """ + typenames: Dict[ + str, str + ] = {} # map from unique name + field name lists to typedef name + declarations: List[str] = [] # function declaration to register the typedef + + for overload in overloads: + fieldnames = namedtuple_fieldnames(overload.function.func.returns) + if not fieldnames: + continue + + name = cpp.name(overload.function.func) # use @with_native_function? + tn_key = gen_namedtuple_typename_key(overload.function) + typename = typenames.get(tn_key) + + if typename is None: + typename = ( + f'{name}NamedTuple{"" if not declarations else len(declarations)}' + ) + typenames[tn_key] = typename + declarations.append(f"PyTypeObject* get_{name}_namedtuple();") + + return declarations + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# +# Method Impl Codegen +# +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # + +# python binding for all overloads of a particular function/method +PY_VARIABLE_METHOD_VARARGS = CodeTemplate( + r"""\ +// ${name} +static PyObject * ${pycname}(PyObject* self_, PyObject* args, PyObject* kwargs) +{ + ${method_header} + static PythonArgParser parser({ + ${signatures} + }, /*traceable=*/${traceable}); + + ParsedArgs<${max_args}> parsed_args; + auto _r = parser.parse(${self_}, args, kwargs, parsed_args); + ${check_has_torch_function} + switch (_r.idx) { + ${dispatch} + } + ${method_footer} +} + +""" +) + +# handler for a single parsed signature - may be a single overload or +# a pair of overloads that whose signatures only differ in output params +# (plugged into PY_VARIABLE_METHOD_VARARGS as an item in ${dispatch}) +PY_VARIABLE_CASE = CodeTemplate( + """\ +case ${overload_index}: { + ${body} +} +""" +) + +# python binding for single-overload function/method +PY_VARIABLE_METHOD_VARARGS_SINGLETON = CodeTemplate( + """\ +// ${name} +static PyObject * ${pycname}(PyObject* self_, PyObject* args, PyObject* kwargs) +{ + ${method_header} + static PythonArgParser parser({ + ${signatures} + }, /*traceable=*/${traceable}); + + ParsedArgs<${max_args}> parsed_args; + auto _r = parser.parse(${self_}, args, kwargs, parsed_args); + ${check_has_torch_function} + ${dispatch} + ${method_footer} +} + +""" +) + +# python binding for a method with no args, shortcuts parsing +PY_VARIABLE_METHOD_NOARGS = CodeTemplate( + """\ +// ${name} +static PyObject * ${pycname}(PyObject* self_, PyObject* args) +{ + ${method_header} + ${check_has_torch_function} + ${dispatch} + ${method_footer} +} + +""" +) + + +def method_impl( + name: BaseOperatorName, + module: Optional[str], + overloads: Sequence[PythonSignatureNativeFunctionPair], + *, + method: bool, + symint: bool = True, +) -> str: + """ + Generate a python binding for all overloads of an op. + """ + pycname = get_pycname(name) + noarg = is_noarg(overloads) + namedtuple_inits, namedtuple_typenames = emit_namedtuple_call(overloads) + + method_header = ["HANDLE_TH_ERRORS"] + method_header += namedtuple_inits + method_header += ( + ["const Tensor& self = THPVariable_Unpack(self_);"] if method else [] + ) + + method_footer = ([] if noarg else ["Py_RETURN_NONE;"]) + ["END_HANDLE_TH_ERRORS"] + + traceable = "true" if all(should_trace(o.function) for o in overloads) else "false" + + grouped_overloads: Sequence[PythonSignatureGroup] = group_overloads( + overloads, symint=symint + ) + is_singleton = len(grouped_overloads) == 1 + signatures: List[str] = [] + dispatch: List[str] = [] + for overload_index, overload in enumerate(grouped_overloads): + signature = overload.signature.signature_str(symint=symint) + signatures.append(f"{cpp_string(str(signature))},") + dispatch_body = emit_dispatch_case( + overload, namedtuple_typenames, symint=symint + ) + dispatch.append( + PY_VARIABLE_CASE.substitute( + overload_index=overload_index, body=dispatch_body + ) + if not is_singleton + else dispatch_body + ) + + if noarg: + template = PY_VARIABLE_METHOD_NOARGS + elif is_singleton: + template = PY_VARIABLE_METHOD_VARARGS_SINGLETON + else: + template = PY_VARIABLE_METHOD_VARARGS + + return template.substitute( + name=name, + pycname=pycname, + method_header=method_header, + max_args=max(o.signature.arguments_count() for o in overloads), + signatures=signatures, + traceable=traceable, + check_has_torch_function=gen_has_torch_function_check( + name=name, + module=module, + noarg=noarg, + method=method, + ), + dispatch=dispatch, + method_footer=method_footer, + self_="self_" if method else "nullptr", + ) + + +def gen_has_torch_function_check( + name: BaseOperatorName, module: Optional[str], *, noarg: bool, method: bool +) -> str: + if noarg: + if method: + return f"""\ +if(check_has_torch_function(self_)) {{ + return handle_torch_function(self_, "{name}"); +}} +""" + else: + return "" + + self_ = "self_" if method else "nullptr" + namespace = ( + { + "torch": "THPVariableFunctionsModule", + "torch.nn": "THPNNVariableFunctionsModule", + "torch.fft": "THPFFTVariableFunctionsModule", + "torch.linalg": "THPLinalgVariableFunctionsModule", + "torch.nested": "THPNestedVariableFunctionsModule", + "torch.sparse": "THPSparseVariableFunctionsModule", + "torch.special": "THPSpecialVariableFunctionsModule", + }[module] + if module + else "THPVariableClass" + ) + + return f"""\ +if(_r.has_torch_function()) {{ + return handle_torch_function(_r, {self_}, args, kwargs, {namespace}, "{module or "torch.Tensor"}"); +}} +""" + + +# handler for output/no-output overload pair +PY_VARIABLE_OUT = CodeTemplate( + """\ +if (_r.isNone(${out_idx})) { + ${call_dispatch} +} else { + ${call_dispatch_out} +} +""" +) + + +def emit_dispatch_case( + overload: PythonSignatureGroup, + namedtuple_typenames: Dict[str, str], + *, + symint: bool = True, +) -> str: + """ + Emit dispatch code for a single parsed signature. This corresponds to either + a single native function, or a pair that differ only in output params. In the + latter case, a single python signature is used for both and dispatching + switches on the presence/absence of passed output args. + """ + if overload.outplace is not None: + # dispatch output and no-output variants, branch on _r.isNone() + return PY_VARIABLE_OUT.substitute( + out_idx=overload.signature.output_idx(), + call_dispatch=emit_single_dispatch( + overload.signature, overload.base, namedtuple_typenames, symint=symint + ), + call_dispatch_out=emit_single_dispatch( + overload.signature, + overload.outplace, + namedtuple_typenames, + symint=symint, + ), + ) + else: + # no-output version only + return emit_single_dispatch( + overload.signature, overload.base, namedtuple_typenames, symint=symint + ) + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# +# Forward Declarations Codegen +# +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # + + +def forward_decls( + name: BaseOperatorName, + overloads: Sequence[PythonSignatureNativeFunctionPair], + *, + method: bool, +) -> Tuple[str, ...]: + if method: + return () + + pycname = get_pycname(name) + if is_noarg(overloads): + return ( + f"""\ +static PyObject * {pycname}(PyObject* self_, PyObject* args); +""", + ) + else: + return ( + f"""\ +static PyObject * {pycname}(PyObject* self_, PyObject* args, PyObject* kwargs); +""", + ) + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# +# Method Def (Binding Table Entry) Codegen +# +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # + + +def method_def( + name: BaseOperatorName, + module: Optional[str], + overloads: Sequence[PythonSignatureNativeFunctionPair], + *, + method: bool, +) -> str: + """ + Generate method def entry. + """ + pycname = get_pycname(name) + + if name.dunder_method: + # PyMethodDef entry for binary op, throws not implemented error + pycname = f"TypeError_to_NotImplemented_<{pycname}>" + + if is_noarg(overloads): + flags = "METH_NOARGS" if method else "METH_VARARGS | METH_KEYWORDS" + else: + pycname = f"castPyCFunctionWithKeywords({pycname})" + flags = "METH_VARARGS | METH_KEYWORDS" + + if module == "torch": + flags += " | METH_STATIC" + + return f'{{"{name}", {pycname}, {flags}, NULL}},' + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# +# Overload Sorting and Grouping +# +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # + + +def group_overloads( + overloads: Sequence[PythonSignatureNativeFunctionPair], *, symint: bool = True +) -> Sequence[PythonSignatureGroup]: + bases: Dict[str, PythonSignatureNativeFunctionPair] = {} + outplaces: Dict[str, PythonSignatureNativeFunctionPair] = {} + + # first group by signature ignoring out arguments + for overload in overloads: + sig = overload.signature.signature_str(skip_outputs=True, symint=symint) + if overload.function.func.is_out_fn(): + if sig in outplaces: + raise RuntimeError( + f"Found duplicated function definition:\n- {overload.function.func}.\n" + f"Existing definition:\n- {outplaces[sig].function.func}." + ) + outplaces[sig] = overload + else: + if sig in bases: + raise RuntimeError( + f"Found duplicated function definition:\n- {overload.function.func}.\n" + f"Existing definition:\n- {bases[sig].function.func}." + ) + bases[sig] = overload + + for sig, out in outplaces.items(): + if sig not in bases: + candidates: List[str] = [] + for overload in overloads: + if ( + str(overload.function.func.name.name) + == str(out.function.func.name.name) + and not overload.function.func.is_out_fn() + and not overload.signature.deprecated + ): + candidates.append( + overload.signature.signature_str( + skip_outputs=True, symint=symint + ) + ) + out_sig = out.signature.signature_str(symint=symint) + raise RuntimeError( + f"While identifying overloads, we found an out schema {out_sig} without a corresponding non-out variant. " + f"We expected the non-out variant to have schema: \n- {sig}\nPlease check that you spelled the schema " + "correctly in native_functions.yaml. We discovered the following candidate(s): \n" + + "\n".join(f"- {candidate}" for candidate in candidates) + ) + + grouped = [ + PythonSignatureGroup.from_pairs( + functional=base, + out=outplaces.get(sig), + ) + for sig, base in bases.items() + ] + return sort_overloads(grouped, symint=symint) + + +# This function declares a partial order on declarations, and sorts them according +# to its linear extension. This is necessary, because there's some ambiguity in the +# choice of overload, and we want a different order. +# +# See Note[Order of overloads matters] +# +# A few examples of ambiguous python signature pairs. +# +# All parameters have the same type, except one taking Tensor the other taking +# Scalar. A numeric PyObject can be casted into Tensor, and a zero-dim Tensor +# object can be accepted as Scalar type parameter (see python_arg_parser.cpp). +# Therefore, same input arguments might be accepted by either python signature. +# We want to always parse the one taking Tensor first. +# +# bitwise_and(Tensor input, Tensor other, *, Tensor out=None) +# bitwise_and(Tensor input, Scalar other, *, Tensor out=None) +# +# If they have different number of parameters then they are not ambiguous - but +# the difference on output param can be ignored as it's optional. +# +# multiply(Tensor input, Tensor other, *, Tensor out=None) +# multiply(Tensor input, Scalar other) +# +# Both positional args and keyword-only args are considered together. +# +# subtract(Tensor other, *, Scalar alpha=1) +# subtract(Scalar other, Scalar alpha=1) +# +# A few ambiguous cases which it does NOT handle yet. +# +# If there is any difference in other parameters besides the Tensor/Scalar +# difference, then they are not considered ambiguous by this method anymore. +# However, the difference could be too trivial to disambiguate. +# +# foo(Tensor input, Scalar other, Scalar bar) +# foo(Tensor input, Tensor other, double bar) +# +# If they are taking different number of parameters then they are not considered +# ambiguous anymore, even if the difference is only on optional kwargs. +# +# foo(Scalar other, Scalar alpha=1) +# foo(Tensor other, *, Scalar alpha=1, Scalar beta=1) +# + + +def sort_overloads( + grouped_overloads: Sequence[PythonSignatureGroup], *, symint: bool = True +) -> Sequence[PythonSignatureGroup]: + # NB: Smaller here means lower priority + + def is_arg_smaller(t1: Type, t2: Type) -> bool: + return ( + str(t1) == "Scalar" + and str(t2) == "Tensor" + or str(t1) == "Scalar?" + and str(t2) == "Tensor?" + or "Dimname" in str(t1) + and "Dimname" not in str(t2) + or + # In the discussion https://github.com/pytorch/pytorch/issues/54555 it has been + # discussed why it is important to prioritize int/int? over int[] + str(t1) == "int[]" + and (str(t2) == "int" or str(t2) == "int?") + or + # TensorList currently throws an error during argument parsing, that's why it needs to be + # last in signature ordering. See discussion: https://github.com/pytorch/pytorch/issues/58087 + str(t1) == "Tensor[]" + and str(t2).find("[]") != -1 + or + # Prioritize IntArrayRef overload over SymIntArrayRef + str(t1) == "SymInt[]" + and str(t2) == "int[]" + or + # Make sure both in, SymInt are sorted consistently w.r.t. Tensor since Tensor can be implicitly + # converted to either int or SymInt. Prioritize the Tensor overload since it otherwise gets shadowed. + (str(t1) == "SymInt" or str(t1) == "int") + and str(t2) == "Tensor" + ) + + def is_smaller(s1: PythonSignature, s2: PythonSignature) -> bool: + """Returns True if s1 < s2 in the partial order.""" + args1, args2 = s1.arguments(skip_outputs=True), s2.arguments(skip_outputs=True) + if len(args1) != len(args2): + return False + # TODO: should use some canonical form instead of 'str(arg.type)' - see comments + # above. The old codegen used the deprecated 'dynamic_type(arg.type)', which + # ignores the optional annotation, i.e. 'Scalar' and 'Scalar?'. + equal = all(arg1.type == arg2.type for arg1, arg2 in zip(args1, args2)) + smaller_or_equal = all( + str(arg1.type) == str(arg2.type) or is_arg_smaller(arg1.type, arg2.type) + for arg1, arg2 in zip(args1, args2) + ) + return smaller_or_equal and not equal + + # First sort by signature + grouped_overloads = sorted( + grouped_overloads, key=lambda x: x.signature.signature_str(symint=symint) + ) + + # Construct the relation graph + larger_than: Dict[int, Set[int]] = defaultdict(set) + for i1, overload1 in enumerate(grouped_overloads): + for i2, overload2 in enumerate(grouped_overloads): + if is_smaller(overload1.signature, overload2.signature): + larger_than[i1].add(i2) + + if not larger_than: + return list(grouped_overloads) + + # Use a topological sort to sort overloads according to the partial order. + N = len(grouped_overloads) + sorted_ids: List[int] = list(filter(lambda x: x not in larger_than, range(N))) + + for idx in range(N): + # The size of sorted_ids will grow to N eventually. + i = sorted_ids[idx] + for j in sorted(larger_than.keys()): + larger = larger_than[j] + larger.discard(i) + if not larger: + del larger_than[j] + sorted_ids.append(j) + + return [grouped_overloads[x] for x in sorted_ids] + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# +# Codegen API Integration +# +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # + + +def emit_single_dispatch( + ps: PythonSignature, + f: NativeFunction, + namedtuple_typenames: Dict[str, str], + *, + symint: bool = True, +) -> str: + """ + Emit dispatch code for a single native function. + """ + + @with_native_function + def go(f: NativeFunction) -> str: + # header comments + if isinstance(ps, PythonSignatureDeprecated): + schema_comment = f"// [deprecated] aten::{ps.deprecated_schema}" + else: + schema_comment = f"// aten::{f.func}" + + deprecated = "[deprecated] " if ps.deprecated else "" + + # dispatch lambda signature + name = cpp.name(f.func) + lambda_formals = ", ".join( + f"{a.type_str} {a.name}" for a in dispatch_lambda_args(ps, f, symint=symint) + ) + lambda_return = dispatch_lambda_return_str(f) + + # dispatch lambda body + dispatch_callee = cpp_dispatch_target(f) + dispatch_args = ", ".join(cpp_dispatch_exprs(f, python_signature=ps)) + + # from arg parser outputs to dispatch lambda arguments + parser_outputs = arg_parser_output_exprs(ps, f, symint=symint) + lambda_arg_exprs = dispatch_lambda_exprs(ps, f, symint=symint) + inits = "\n".join(lambda_arg_exprs.inits) + lambda_args = ", ".join(lambda_arg_exprs.exprs) + + # scatter fields + # TODO: Checking `ps.method and ('requires_grad' in parser_outputs)` is a hacky + # solution for enabling the 'requires_grad' argument for tensor methods + # new_full, new_empty, and new_zeros. A much better but more difficult to + # implement solution involves refactoring according to Ed's description here: + # https://github.com/pytorch/pytorch/issues/36455#issuecomment-614767589 + need_set_requires_grad = ps.tensor_options_args and ( + not has_tensor_options(f) + or (ps.method and ("requires_grad" in parser_outputs)) + ) + set_requires_grad = ( + f'.set_requires_grad({parser_outputs["requires_grad"].expr})' + if need_set_requires_grad + else "" + ) + + if lambda_return == "void": + return f"""\ +{schema_comment} +{inits} +auto dispatch_{name} = []({lambda_formals}) -> {lambda_return} {{ + pybind11::gil_scoped_release no_gil; + {dispatch_callee}({dispatch_args}); +}}; +dispatch_{name}({lambda_args}){set_requires_grad}; +Py_RETURN_NONE; +""" + else: + typename = namedtuple_typenames.get(gen_namedtuple_typename_key(f)) + namedtuple_typeref = f"{typename}, " if typename is not None else "" + return f"""\ +{schema_comment} +{inits} +auto dispatch_{name} = []({lambda_formals}) -> {lambda_return} {{ + pybind11::gil_scoped_release no_gil; + return {dispatch_callee}({dispatch_args}); +}}; +return wrap({namedtuple_typeref}dispatch_{name}({lambda_args}){set_requires_grad}); +""" + + return go(f) diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_variable_factories.py b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_variable_factories.py new file mode 100644 index 0000000000000000000000000000000000000000..d7cf4a342328db5c8cd83bf581eb9c5c21278cb0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_variable_factories.py @@ -0,0 +1,115 @@ +# Generates C++ functions that wrap ATen tensor factory methods to turn them into Variables. +# +# This writes one file: variable_factories.h + +import re +from typing import List, Optional + +import torchgen.api.python as python +from torchgen.api import cpp + +from torchgen.api.types import CppSignatureGroup +from torchgen.context import with_native_function +from torchgen.gen import parse_native_yaml +from torchgen.model import NativeFunction, TensorOptionsArguments, Variant +from torchgen.utils import FileManager, mapMaybe + +OPTIONAL_TYPE_PATTERN = re.compile(r"c10::optional<(.+)>") +TYPE_PATTERN = re.compile(r"(?:const\s+)?([A-Z]\w+)") + + +# Add 'at::' to types defined in ATen namespace, e.g. Tensor, TensorList, IntArrayRef and etc. +# TODO: maybe update the cpp argument API to take optional namespace argument? +def fully_qualified_type(argument_type: str) -> str: + def maybe_optional_type(type: str, is_opt: bool) -> str: + return f"c10::optional<{type}>" if is_opt else type + + opt_match = OPTIONAL_TYPE_PATTERN.match(argument_type) + is_opt = opt_match is not None + if opt_match: + argument_type = argument_type[opt_match.start(1) : opt_match.end(1)] + match = TYPE_PATTERN.match(argument_type) + if match is None: + return maybe_optional_type(argument_type, is_opt) + index = match.start(1) + qualified_type = f"{argument_type[:index]}at::{argument_type[index:]}" + return maybe_optional_type(qualified_type, is_opt) + + +def gen_variable_factories( + out: str, native_yaml_path: str, tags_yaml_path: str, template_path: str +) -> None: + native_functions = parse_native_yaml( + native_yaml_path, tags_yaml_path + ).native_functions + factory_functions = [fn for fn in native_functions if is_factory_function(fn)] + fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False) + fm.write_with_template( + "variable_factories.h", + "variable_factories.h", + lambda: { + "generated_comment": "@" + + f"generated from {fm.template_dir_for_comments()}/variable_factories.h", + "ops_headers": [ + f"#include " for fn in factory_functions + ], + "function_definitions": list(mapMaybe(process_function, factory_functions)), + }, + ) + + +@with_native_function +def is_factory_function(f: NativeFunction) -> bool: + if Variant.function not in f.variants: + return False + + name = cpp.name(f.func) + has_tensor_options = python.has_tensor_options(f) + return has_tensor_options or name.endswith("_like") + + +@with_native_function +def process_function(f: NativeFunction) -> Optional[str]: + name = cpp.name(f.func) + has_tensor_options = python.has_tensor_options(f) + is_factory = has_tensor_options or name.endswith("_like") + + if Variant.function not in f.variants or not is_factory: + return None + + cpp_sigs = CppSignatureGroup.from_native_function(f, method=False) + sigs = [cpp_sigs.signature] + if cpp_sigs.symint_signature is not None: + sigs.append(cpp_sigs.symint_signature) + r = "" + for sig in sigs: + formals: List[str] = [] + exprs: List[str] = [] + requires_grad = "false" + for arg in sig.arguments(): + qualified_type = fully_qualified_type(arg.type) + if arg.default: + formals.append(f"{qualified_type} {arg.name} = {arg.default}") + else: + formals.append(f"{qualified_type} {arg.name}") + + if isinstance(arg.argument, TensorOptionsArguments): + # note: we remove the requires_grad setting from the TensorOptions because + # it is ignored anyways (and we actually have an assertion that it isn't set + # which would fail otherwise). We handle requires_grad explicitly here + # instead of passing it through to the kernel. + exprs.append( + f"at::TensorOptions({arg.name}).requires_grad(c10::nullopt)" + ) + # Manually set the requires_grad bit on the result tensor. + requires_grad = f"{arg.name}.requires_grad()" + else: + exprs.append(arg.name) + + r += f"""\ +inline at::Tensor {sig.name()}({', '.join(formals)}) {{ + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::{sig.name()}({', '.join(exprs)}), /*requires_grad=*/{requires_grad}); +}} +""" + return r diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_variable_type.py b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_variable_type.py new file mode 100644 index 0000000000000000000000000000000000000000..09455318a983e34e415846e4b1424b4912f3ff94 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_variable_type.py @@ -0,0 +1,2164 @@ +# Generates VariableType.h/cpp +# +# **If any changes are being made to the VariableType codegen please also check +# if updates are needed in torch/csrc/autograd/autograd_not_implemented_fallback.cpp +# +# VariableType is a subclass of at::Type that provides the binding code +# necessary to provide a differentiable version of ATen operators. There are a +# number of different things we could mean: +# +# - Given a non-differentiable forward implementation, we might +# directly associate it with a backward implementation to make +# it differentiable. This is the common case. +# +# - Some functions don't need a backwards implementation, because +# backpropagation will never propagate beyond them. There are a +# number of different reasons why this may be the case: +# +# - The function has no differentiable inputs +# - The function's output is not differentiable +# - The function has no data dependency on its input +# +# - Some function don't need a backwards implementation because they +# are implemented as a composition of other (differentiable) ATen +# functions. These are dispatched directly to the Type superclass, +# which will in turn dispatch back to VariableType for its +# differentiable subcomponents. +# +import re +from typing import Callable, Dict, List, Optional, Sequence, Set, Tuple, Union + +from torchgen.api import cpp +from torchgen.api.autograd import ( + DifferentiableInput, + dispatch_strategy, + ForwardDerivative, + gen_differentiable_outputs, + is_differentiable, + NativeFunctionWithDifferentiabilityInfo, + SavedAttribute, +) + +from torchgen.api.types import ( + ArrayRefCType, + BaseCppType, + BaseCType, + Binding, + DispatcherSignature, + intArrayRefT, + iTensorListRefT, + ListCType, + MutRefCType, + OptionalCType, + scalarT, + SpecialArgName, + stringT, + symIntArrayRefT, + TENSOR_LIST_LIKE_CTYPES, + tensorListT, + tensorT, + TupleCType, + VectorCType, +) +from torchgen.code_template import CodeTemplate +from torchgen.context import ( + native_function_manager, + with_native_function, + with_native_function_and, +) +from torchgen.model import ( + Argument, + BaseType, + ListType, + NativeFunction, + SchemaKind, + SelfArgument, + TensorOptionsArguments, +) +from torchgen.utils import FileManager, mapMaybe + +from .context import with_native_function_with_differentiability_info_and_key +from .gen_inplace_or_view_type import ( + ALL_VIEW_FUNCTIONS, + ASSIGN_RETURN_VALUE, + AUTOGRAD_NOT_IMPLEMENTED_REGISTRATION, + gen_formals, + get_base_name, + get_view_info, + is_tensor_list_type, + is_tensor_type, + METHOD_DEFINITION, + modifies_arguments, + TMP_VAR, + unpack_args, + unpacked_name, + use_derived, + WRAPPER_REGISTRATION, +) +from .gen_trace_type import ( + declare_returned_variables, + get_return_value, + MANUAL_AUTOGRAD_AND_TRACER, + MANUAL_BACKEND, + tie_return_values, + type_wrapper_name, +) + +# We don't set or modify grad_fn on these methods. Generally, they return +# tensors that have requires_grad=False. In-place functions listed here will +# not examine or modify requires_grad or grad_fn. +# NB: this does NOT include overload name +DONT_REQUIRE_DERIVATIVE = { + # These only depend on the input Tensor's shape and device, not the data + "empty_like", + "ones_like", + "full_like", + "zeros_like", + "rand_like", + "randn_like", + "new_empty", + "new_empty_strided", + "new_full", + "new_zeros", + "new_ones", + # These are only implemented on integral types + "__and__", + "__iand__", + "__ilshift__", + "__ior__", + "__irshift__", + "__ixor__", + "__lshift__", + "__or__", + "__rshift__", + "__xor__", + # These work on integral data types, and hence don't require derivative + "_sobol_engine_draw", + "_sobol_engine_ff", + "_sobol_engine_scramble_", + "_sobol_engine_initialize_state_", + # This is an unsafe method that is meant to be out of reach of autograd. + "_coalesced_", + # Quantize functions should not record gradients + "quantize_per_tensor", + "quantize_per_channel", + # Functions that return integers should not have output that require gradients + "argmax", + "argmin", + "argsort", + "searchsorted", + "bucketize", + # Functions that return booleans are not differentiable + "isnan", + "isposinf", + "isneginf", + "isinf", + "signbit", + "isin", + "allclose", + # Functions return none are not differentiable + "record_stream", + # These functions are not differentiable + "logical_and", + "logical_xor", + "logical_not", + "logical_or", + # This function returns nested_tensor shape as a tensor that is non-differentiable + "_nested_tensor_size", + "_nested_tensor_strides", + "_nested_tensor_storage_offsets", +} + +# The C -> R functions at the time of adding this are still being audited and tested +# but will not error out. +# C -> C, R -> C functions for which backward is correctly implemented and tested +GRADIENT_IMPLEMENTED_FOR_COMPLEX = { + "fill", + "t", + "view", + "reshape", + "reshape_as", + "view_as", + "roll", + "clone", + "block_diag", + "diag_embed", + "repeat", + "expand", + "flip", + "fliplr", + "flipud", + "rot90", + "nanmean", + "nansum", + "transpose", + "permute", + "squeeze", + "unsqueeze", + "resize", + "resize_as", + "tril", + "triu", + "chunk", + "zero_", + "eq_", + "ne_", + "add", + "__radd__", + "sum", + "_conj", + "sin", + "cos", + "mul", + "sinc", + "sinh", + "cosh", + "__rmul__", + "sgn", + "asin", + "acos", + "sub", + "div", + "cat", + "view_as_complex", + "index_put", + "neg", + "complex", + "select", + "where", + "as_strided", + "as_strided_scatter", + "slice", + "constant_pad_nd", + "unbind", + "split", + "split_with_sizes", + "unsafe_split", + "split_with_sizes_backward", + "dot", + "vdot", + "cholesky", + "triangular_solve", + "mm", + "_unsafe_view", + "mv", + "outer", + "bmm", + "diagonal", + "alias", + "atan", + "log", + "log10", + "log1p", + "log2", + "logaddexp", + "logcumsumexp", + "reciprocal", + "tan", + "pow", + "rsqrt", + "tanh", + "tanh_backward", + "asinh", + "acosh", + "atanh", + "take", + "fill_", + "exp", + "exp2", + "expm1", + "nonzero", + "mean", + "std_mean", + "var_mean", + "inverse", + "solve", + "linalg_cholesky", + "addcmul", + "addcdiv", + "matrix_exp", + "linalg_matrix_exp", + "_linalg_eigh", + "cholesky_solve", + "linalg_qr", + "_linalg_svd", + "_fft_c2c", + "_fft_r2c", + "linalg_solve", + "sqrt", + "stack", + "gather", + "index_select", + "index_add_", + "linalg_inv", + "linalg_inv_ex", + "baddbmm", + "addbmm", + "addmm", + "addmv", + "addr", + "linalg_householder_product", + "ormqr", + "reflection_pad1d", + "reflection_pad2d", + "reflection_pad3d", + "linalg_cholesky_ex", + "linalg_eig", + "diagonal_copy", + "diagonal_scatter", + "select_backward", + "diagonal_backward", + "slice_backward", + "reflection_pad1d_backward", + "reflection_pad2d_backward", + "reflection_pad3d_backward", + "_sparse_sparse_matmul", + "replication_pad1d", + "replication_pad2d", + "replication_pad3d", + "put", + "put_", + "_to_copy", + "replication_pad1d_backward", + "replication_pad2d_backward", + "replication_pad3d_backward", + "diag", + "masked_scatter", + "masked_select", + "index_add", + "index_fill", + "trace", + "polar", + "cumsum", + "rsub", + "eig", + "lerp", + "linalg_vector_norm", + "cumprod", + "prod", + "index_copy", + "lu", + "unfold", + "unfold_backward", + "index", + "masked_fill", + "masked_scatter_backward", + "linalg_cross", + "lu_unpack", + "renorm", + "_conj_physical", + "linalg_lu_factor_ex", + "scatter", + "scatter_add", + "sigmoid", + "sigmoid_backward", + "sparse_mask", + "trapezoid", + "cumulative_trapezoid", + "conj_physical_", + "_neg_view", + "_reshape_alias", + "_reshape_copy", + "_linalg_det", + "lu_solve", + "linalg_solve_triangular", + "linalg_pinv", + "linalg_lstsq", + "unfold_copy", + "col2im", + "im2col", + "cholesky_inverse", + "to_sparse", + "sparse_sampled_addmm", + "linalg_lu", + "pixel_shuffle", + "pixel_unshuffle", + "linalg_lu_solve", + "_linalg_slogdet", + "_linalg_solve_ex", +} + +GRADIENT_IMPLEMENTED_FOR_SPARSE_COMPLEX = { + "_to_dense", + "_coalesce", + "coalesce", + "values", + "_sparse_coo_tensor_with_dims_and_tensors", + "_sparse_addmm", +} + +GRADIENT_IMPLEMENTED_FOR_COMPLEX.update(GRADIENT_IMPLEMENTED_FOR_SPARSE_COMPLEX) + +# Some operators invalidate the grad_accumulator. Let's reset it. +RESET_GRAD_ACCUMULATOR = {"set_", "resize_"} + +# NOTE [ TensorImpl and Storage Pointer Sanity Checks ] +# +# We check the following properties: +# 1) A function should never change the input tensors' underlying c10::TensorImpl +# pointers or c10::Storage pointers, even if it modifies its input tensors (via +# inplace or out-variants) +# If the function does not modify its arguments, we also check the following properties +# pertaining to its output: +# 2) Its TensorImpl has use_count of 1 +# 3) If the function is a view function, it has the same StorageImpl as that of +# the input it is aliased with. Otherwise, its StorageImpl has use_count of 1 +# +# The following code templates implement the checks for this invariant: +SAVE_TENSOR_STORAGE = CodeTemplate( + """\ +c10::optional ${tensor_name}_storage_saved = + ${tensor_name}.has_storage() ? c10::optional(${tensor_name}.storage()) : c10::nullopt; +""" +) + + +# If tensor_name == out_tensor_name, used to enforce (1), otherwise used for (2) +ENFORCE_SAME_TENSOR_STORAGE = CodeTemplate( + """\ +if (${tensor_name}_storage_saved.has_value() && + !at::impl::dispatch_mode_enabled() && + !at::impl::tensor_has_dispatch(${tensor_name})) + TORCH_INTERNAL_ASSERT(${tensor_name}_storage_saved.value().is_alias_of(${out_tensor_name}.storage())); +""" +) + +SAVE_TENSORLIST_STORAGE = CodeTemplate( + """\ +std::vector> ${tensorlist_name}_storage_saved(${tensorlist_name}.size()); +for (const Tensor& tensor : ${tensorlist_name}) + ${tensorlist_name}_storage_saved.push_back( + tensor.has_storage() ? c10::optional(tensor.storage()) : c10::nullopt); +""" +) + +ENFORCE_SAME_TENSORLIST_STORAGE = CodeTemplate( + """\ +for (size_t i=0; i<${tensorlist_name}.size() && !at::impl::dispatch_mode_enabled(); i++) { + if (${tensorlist_name}_storage_saved[i].has_value() && !at::impl::tensorlist_has_dispatch(${tensorlist_name})) + TORCH_INTERNAL_ASSERT(${tensorlist_name}_storage_saved[i].value().is_alias_of(${tensorlist_name}[i].storage())); +} +""" +) + +SAVE_OPTIONALTENSORLIST_STORAGE = CodeTemplate( + """\ +std::vector> ${tensorlist_name}_storage_saved(${tensorlist_name}.size()); +for (const c10::optional& tensor : ${tensorlist_name}) + ${tensorlist_name}_storage_saved.push_back( + tensor.has_value() && tensor->has_storage() ? c10::optional(tensor->storage()) : c10::nullopt); +""" +) + +ENFORCE_SAME_OPTIONALTENSORLIST_STORAGE = CodeTemplate( + """\ +for (size_t i=0; i<${tensorlist_name}.size() && !at::impl::dispatch_mode_enabled(); i++) { + if (${tensorlist_name}_storage_saved[i].has_value() && !at::impl::tensorlist_has_dispatch(${tensorlist_name})) + TORCH_INTERNAL_ASSERT(${tensorlist_name}_storage_saved[i].value().is_alias_of( + static_cast>(${tensorlist_name}[i])->storage())); +} +""" +) + +SAVE_TENSOR_IMPL = CodeTemplate( + """\ +c10::intrusive_ptr ${tensor_name}_impl_saved; +if (${tensor_name}.defined()) ${tensor_name}_impl_saved = ${tensor_name}.getIntrusivePtr(); +""" +) + +ENFORCE_SAME_TENSOR_IMPL = CodeTemplate( + """\ +if (${tensor_name}_impl_saved && !at::impl::dispatch_mode_enabled() && !at::impl::tensor_has_dispatch(${tensor_name})) + TORCH_INTERNAL_ASSERT(${tensor_name}_impl_saved == ${tensor_name}.getIntrusivePtr()); +""" +) + +ENFORCE_TENSOR_IMPL_USE_COUNT_LT_OR_EQ_ONE = CodeTemplate( + """\ +if (!at::impl::dispatch_mode_enabled() && !at::impl::tensor_has_dispatch(${tensor_name})) + TORCH_INTERNAL_ASSERT(${tensor_name}.use_count() <= 1, "function: ${fn_name}"); +""" +) + +ENFORCE_TENSOR_STORAGE_USE_COUNT_EQUALS_ONE = CodeTemplate( + """\ +if (${tensor_name}.has_storage() && !at::impl::dispatch_mode_enabled() && !at::impl::tensor_has_dispatch(${tensor_name})) { + TORCH_INTERNAL_ASSERT(${tensor_name}.storage().use_count() == 1, "function: ${fn_name}"); +} +""" +) + +SAVE_TENSORLIST_IMPL = CodeTemplate( + """\ +std::vector> ${tensorlist_name}_impl_saved(${tensorlist_name}.size()); +for (size_t i=0; i<${tensorlist_name}.size(); i++) + if (${tensorlist_name}[i].defined()) ${tensorlist_name}_impl_saved[i] = ${tensorlist_name}[i].getIntrusivePtr(); +""" +) + +ENFORCE_SAME_TENSORLIST_IMPL = CodeTemplate( + """\ +for (size_t i=0; i<${tensorlist_name}.size() && !at::impl::dispatch_mode_enabled(); i++) { + if (${tensorlist_name}_impl_saved[i] && !at::impl::tensorlist_has_dispatch(${tensorlist_name})) + TORCH_INTERNAL_ASSERT(${tensorlist_name}_impl_saved[i] == ${tensorlist_name}[i].getIntrusivePtr()); +} +""" +) + +SAVE_OPTIONALTENSORLIST_IMPL = CodeTemplate( + """\ +std::vector> ${tensorlist_name}_impl_saved(${tensorlist_name}.size()); +for (size_t i=0; i<${tensorlist_name}.size(); i++) { + c10::optional t = ${tensorlist_name}[i]; + if (t.has_value() && t->defined()) ${tensorlist_name}_impl_saved[i] = t->getIntrusivePtr(); +} +""" +) + +ENFORCE_SAME_OPTIONALTENSORLIST_IMPL = CodeTemplate( + """\ +for (size_t i=0; i<${tensorlist_name}.size() && !at::impl::dispatch_mode_enabled(); i++) { + if (${tensorlist_name}_impl_saved[i]) + TORCH_INTERNAL_ASSERT( + ${tensorlist_name}_impl_saved[i] == static_cast>(${tensorlist_name}[i])->getIntrusivePtr()); +} +""" +) + +# The following list contains functions that we don't enforce the invariant on. +DONT_ENFORCE_SAME_TENSOR_IMPL_OR_STORAGE = { + # These functions are expected to change impl or storage of input tensors + "set_", + "_cudnn_rnn_flatten_weight", +} +DONT_ENFORCE_TENSOR_IMPL_USE_COUNT = { + # These non-inplace, non-out functions return tensors with use_count > 1 + # Therefore, they MAY (but not necessarily) return one of its inputs as-is + # See https://github.com/pytorch/pytorch/issues/60426 for more information + "_embedding_bag", + "_embedding_bag_forward_only", + "q_per_channel_scales", + "q_per_channel_zero_points", + "lu_unpack", + "_cudnn_rnn_backward", + # The below failed StorageImpl use_count check but we skip tensor_impl check + # just in case + "_cudnn_rnn", + "dequantize_self", + # lift() should never actually be called with a requires_grad=True tensor, + "lift", + "lift_fresh", + "lift_fresh_copy", + # Nested Tensors related functions + # _nested_tensor_size() should never actually be called with requires_grad=True tensor + "_nested_tensor_size", + "_nested_tensor_strides", + "_nested_tensor_storage_offsets", +} + +DONT_ENFORCE_STORAGE_IMPL_USE_COUNT = { + # These non-view functions return tensors with storage use_count != 1 + "_slow_conv2d_forward", + "slow_conv3d_forward", + "channel_shuffle", + # If an input is returned as-is in output, we cannot guarantee its storage_impl + # use count to be 1 either. + *DONT_ENFORCE_TENSOR_IMPL_USE_COUNT, +} +# END CHECKS FOR [ TensorImpl and Storage Pointer Sanity Checks ] + +DECLARE_GRAD_FN = CodeTemplate( + """\ +std::shared_ptr<${op}> grad_fn; +""" +) + +DECLARE_VECTOR_OF_GRAD_FN = CodeTemplate( + """\ +std::vector> grad_fns; +""" +) + +SETUP_ANY_REQUIRES_GRAD = CodeTemplate( + """\ +[[maybe_unused]] auto _any_requires_grad = compute_requires_grad( ${args_with_derivatives} ); +${extra_differentiability_conditions} +""" +) + +SETUP_DERIVATIVE = CodeTemplate( + """\ +if (_any_requires_grad) { + ${setup} +} +""" +) + +SETUP_NONE_REQUIRES_GRAD = CodeTemplate( + """\ +if (compute_requires_grad( ${args_to_check} )) { + throw_error_out_requires_grad("${base_name}"); +} +""" +) + +ASSIGN_GRAD_FN = CodeTemplate( + """\ +grad_fn = std::shared_ptr<${op}>(new ${op}(${op_ctor}), deleteNode); +grad_fn->set_next_edges(collect_next_edges( ${args_with_derivatives} )); +""" +) + +# note(crcrpar): `compute_requires_grad` in the template below is supplied with arguments indexed with `i` +# while the `SETUP_ANY_REQUIRES_GRAD` above takes whole tensors and scalars. +ASSIGN_VECTOR_OF_GRAD_FN = CodeTemplate( + """\ +for (const auto& i : c10::irange( ${irange} )) { + const auto ith_requires_grad = compute_requires_grad(${args_with_derivatives}); + check_inplace(self[i], ith_requires_grad); + grad_fns.push_back([&]() -> std::shared_ptr<${op}> { + if (!ith_requires_grad) { + return nullptr; + } else { + auto grad_fn = std::shared_ptr<${op}>(new ${op}(${op_ctor}), deleteNode); + grad_fn->set_next_edges(collect_next_edges( ${args_with_derivatives} )); + return grad_fn; + } + }()); +} +""" +) + +CALL_REDISPATCH = CodeTemplate( + """\ +at::redispatch::${api_name}(${unpacked_args})""" +) +# If the non-variable operation has return values, we use the `tmp` variable to hold the +# values temporarily and pass the values to the return variables outside of the +# `at::AutoDispatchBelowAutograd` guard block. +DISPATCH_TO_NON_VAR_TYPE_WITH_TMP_RETURN_VALUES_JVP_DECOMP = CodeTemplate( + """\ +auto ${tmp_var} = ([&]() { + if (${any_has_forward_grad}) { + static c10::OperatorName full_name("aten::${op_name}", "${op_overload}"); + static c10::optional opt_op = c10::Dispatcher::singleton().findSchema(full_name); + return impl::run_jit_decomposition_with_args_for_jvp<${return_types}>("${op_name}", *opt_op, ks, ${arg_names}); + } else { + ${guard} + return ${base_type_call}; + } +})(); +""" +) + +DISPATCH_TO_NON_VAR_TYPE_WITH_TMP_RETURN_VALUES = CodeTemplate( + """\ +auto ${tmp_var} = ([&]() { + ${guard} + return ${base_type_call}; +})(); +""" +) + +DISPATCH_TO_NON_VAR_TYPE_WITHOUT_RETURN_VALUES = CodeTemplate( + """\ +{ + ${guard} + ${base_type_call}; +} +""" +) + +SET_HISTORY = CodeTemplate( + """\ +if (grad_fn) { + ${fn}_history(${differentiable_outputs}, grad_fn); +} +""" +) + +LOOP_OVER_VECTOR_OF_GRAD_FNS = CodeTemplate( + """\ +if (!grad_fns.empty()) { + ${preamble} + for (const auto& i : c10::irange(grad_fns.size())) { + auto grad_fn = grad_fns[i]; + if (grad_fn != nullptr) { + ${statements} + } + } +} +""" +) + +CONDITIONAL = CodeTemplate( + """\ +if (${cond}) { + ${statements} +} +""" +) + +RUN_ONLY_IN_DEBUG_MODE = CodeTemplate( + """\ +#ifndef NDEBUG +${statements} +#endif +""" +) + +FW_DERIVATIVE_CHECK_TEMPLATE = CodeTemplate( + """\ +isFwGradDefined(${req_inp})\ +""" +) +FW_DERIVATIVE_SIZE_CHECK_TEMPLATE = CodeTemplate( + """\ +TORCH_CHECK( + self.size() == ${inp_name}.size(), + "Tensor lists must have the same number of tensors, got ", + self.size(), + " and ", + ${inp_name}.size()); +""" +) + +FW_DERIVATIVE_TENSORLIST_CHECK_TEMPLATE = CodeTemplate( + """\ +isFwGradDefinedTensorList(${req_inp})\ +""" +) + +FW_DERIVATIVE_DEFINED_GRAD_TEMPLATE = CodeTemplate( + """\ +auto ${inp_name}_t_raw = toNonOptFwGrad(${inp}); +auto ${inp_name}_tensor = toNonOptTensor(${inp}); +auto ${inp_name}_t = (${inp_name}_t_raw.defined() || !${inp_name}_tensor.defined()) + ? ${inp_name}_t_raw : at::${zeros_fn}(${inp_name}_tensor.sizes(), ${inp_name}_tensor.options()); +""" +) + +FW_DERIVATIVE_DEFINED_PRIMAL_TEMPLATE = CodeTemplate( + """\ +auto ${inp_name}_p = toNonOptPrimal(${inp}); +""" +) + +FW_DERIVATIVE_SETTER_TENSOR = CodeTemplate( + """\ +if (${out_arg}_new_fw_grad_opt.has_value() && ${out_arg}_new_fw_grad_opt.value().defined() && ${out_arg}.defined()) { + // The hardcoded 0 here will need to be updated once we support multiple levels. + ${out_arg}._set_fw_grad(${out_arg}_new_fw_grad_opt.value(), /* level */ 0, /* is_inplace_op */ ${is_inplace}); +} +""" +) + +FW_DERIVATIVE_SETTER_TENSOR_FOREACH = CodeTemplate( + """\ +for (const auto& i : c10::irange(${out_arg}_new_fw_grad_opts.size())) { + auto& ${out_arg}_new_fw_grad_opt = ${out_arg}_new_fw_grad_opts[i]; + if (${out_arg}_new_fw_grad_opt.has_value() && ${out_arg}_new_fw_grad_opt.value().defined() && ${out_arg}[i].defined()) { + // The hardcoded 0 here will need to be updated once we support multiple levels. + ${out_arg}[i]._set_fw_grad(${out_arg}_new_fw_grad_opt.value(), /* level */ 0, /* is_inplace_op */ ${is_inplace}); + } +} +""" +) + +FW_DERIVATIVE_SETTER_MULTI_OUTPUT = CodeTemplate( + """\ +if (${all_res}_new_fw_grad_opt.has_value() && std::get<${idx}>(${all_res}_new_fw_grad_opt.value()).defined() + && ${out_arg}.defined()) { + ${out_arg}._set_fw_grad(std::get<${idx}>(${all_res}_new_fw_grad_opt.value()), /* level */ 0, /* is_inplace_op */ false); +} +""" +) + +FW_DERIVATIVE_SETTER_TENSOR_LIST = CodeTemplate( + """\ +if (${out_arg}_new_fw_grad_opt.has_value()) { + auto ${out_arg}_new_fw_grad = ${out_arg}_new_fw_grad_opt.value(); + TORCH_INTERNAL_ASSERT(${out_arg}.size() == ${out_arg}_new_fw_grad.size()); + for (const auto i : c10::irange(${out_arg}.size())) { + if (${out_arg}_new_fw_grad[i].defined() && ${out_arg}[i].defined()) { + // The hardcoded 0 here will need to be updated once we support multiple levels. + ${out_arg}[i]._set_fw_grad(${out_arg}_new_fw_grad[i], /* level */ 0, /* is_inplace_op */ ${is_inplace}); + } + } +} +""" +) + +FW_DERIVATIVE_TEMPLATE = CodeTemplate( + """\ +${fw_grad_opt_definition} +if (${requires_fw_grad}) { + ${unpacked_arguments} + ${out_arg}_new_fw_grad_opt = ${formula}; +} +""" +) + +FW_DERIVATIVE_FOREACH_TEMPLATE = CodeTemplate( + """\ +${fw_grad_opt_definition} +for (const auto& i : c10::irange(${vector_of_optional_tensor}.size())) { + if (${any_has_forward_grad_for_current_index}) { + ${unpacked_arguments} + ${vector_of_optional_tensor}[i] = ${formula}; + } +} +""" +) + +FW_DERIVATIVE_FORBID_TEMPLATE = CodeTemplate( + """\ +TORCH_CHECK_NOT_IMPLEMENTED(!(${cond}), "Trying to use forward AD with ${name} that does not support it ${msg}"); +""" +) + +FW_DERIVATIVE_FORBID_LIST_TEMPLATE = CodeTemplate( + """\ +for (const auto& _t: ${arg}) { + TORCH_CHECK_NOT_IMPLEMENTED(!(${cond}), "Trying to use forward AD with ${name} that does not support it ${msg}"); +} +""" +) + + +def gen_variable_type( + out: str, + native_yaml_path: str, + tags_yaml_path: str, + fns_with_diff_infos: List[NativeFunctionWithDifferentiabilityInfo], + template_path: str, + used_keys: Set[str], +) -> None: + """VariableType.h and VariableType.cpp body + + This is the at::Type subclass for differentiable tensors. The + implementation of each function dispatches to the base tensor type to + compute the output. The grad_fn is attached to differentiable functions. + """ + fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False) + fm.write( + "VariableType.h", + lambda: { + "generated_comment": "@" + + f"generated from {fm.template_dir_for_comments()}/VariableType.h" + }, + ) + + # helper that generates a TORCH_LIBRARY_IMPL macro for each + # dispatch key that appears in derivatives.yaml + def wrapper_registrations(used_keys: Set[str]) -> str: + library_impl_macro_list: List[str] = [] + for key in sorted(used_keys): + dispatch_key = key + if key == "Default": + dispatch_key = "Autograd" + library_impl_macro = ( + f"TORCH_LIBRARY_IMPL(aten, {dispatch_key}, m) " + + "{\n" + + "${" + + f"wrapper_registrations_{key}" + + "}\n}" + ) + library_impl_macro_list += [library_impl_macro] + return "\n\n".join(library_impl_macro_list) + + # Generate a new template from VariableType.cpp which replaces ${wrapper_registrations} + # with per key TORCH_LIBRARY_IMPL macros for each key that appears in derivatives.yaml + fm1 = FileManager( + install_dir=out + "/templates", template_dir=template_path, dry_run=False + ) + fm1.write( + "VariableType.cpp", + lambda: { + "type_derived_method_definitions": "\n\n".join( + [ + "${" + f"type_derived_method_definitions_{key}" + "}" + for key in sorted(used_keys) + ] + ), + "wrapper_registrations": wrapper_registrations(used_keys), + }, + ) + + # Generate final VariableType_*.cpp files from the generated template + fm2 = FileManager(install_dir=out, template_dir=out + "/templates", dry_run=False) + + sharded_keys = set( + [f"type_derived_method_definitions_{key}" for key in sorted(used_keys)] + + [f"wrapper_registrations_{key}" for key in sorted(used_keys)] + ) + # NOTE: see Note [Sharded File] at the top of the VariableType.cpp + # template regarding sharding of the generated files. + fm2.write_sharded( + "VariableType.cpp", + [fn for fn in fns_with_diff_infos if use_derived(fn)], + key_fn=lambda fn: cpp.name(fn.func.func), + base_env={ + "generated_comment": "@" + + f"generated from {fm.template_dir_for_comments()}/VariableType.cpp", + }, + env_callable=gen_variable_type_func, + num_shards=5, + sharded_keys=sharded_keys, + ) + + +@with_native_function_and +def gen_wrapper_registration(f: NativeFunction, key: str = "Default") -> str: + return WRAPPER_REGISTRATION.substitute( + unqual_operator_name_with_overload=f.func.name, + type_wrapper_name=type_wrapper_name(f, key), + class_type="VariableType", + ) + + +def gen_variable_type_func( + fn: NativeFunctionWithDifferentiabilityInfo, +) -> Dict[str, List[str]]: + f = fn.func + result = {} + with native_function_manager(f): + name = cpp.name(f.func) + formals = gen_formals(f) + + if ( + fn.info is None + and str(f.func.name.name) not in RESET_GRAD_ACCUMULATOR + and get_base_name(f) not in DONT_REQUIRE_DERIVATIVE + and len(gen_differentiable_outputs(fn)) > 0 + and cpp.name(f.func) not in DONT_ENFORCE_SAME_TENSOR_IMPL_OR_STORAGE + and type_wrapper_name(f) not in DONT_ENFORCE_STORAGE_IMPL_USE_COUNT + and type_wrapper_name(f) not in DONT_ENFORCE_TENSOR_IMPL_USE_COUNT + ): + # NOTE: [ Registering AutogradNotImplemented boxed kernel ] + # + # When there is no derivatives.yaml entry, we register a generic boxed + # NotImplemented kernel to set grad_fn to be NotImplemented, so that forward + # proceeds as usual but an error is properly produced on backward. + # TODO: it would be nice to not have these special cases + # + # There are several cases where still let codegen handle it: + # 1) ops that need to reset grad accumulator (we let codegen handle this case + # because) the list is (currently) only accessible in Python. + # 2) User explicitly specifies DONT_REQUIRE_DERIVATIVE. This basically makes + # autograd a fallthrough with NDEBUG checks. This can be useful for when all + # outputs are integral. + # 3) When there are no differentiable outputs. This is similar to (2). + # 4) There are certain ops where we skip certain NDEBUG checks. this is similar + # to (1). + type_definition = "" + wrapper_registration = AUTOGRAD_NOT_IMPLEMENTED_REGISTRATION.substitute( + unqual_operator_name_with_overload=f.func.name + ) + result["type_derived_method_definitions_Default"] = [type_definition] + result["wrapper_registrations_Default"] = [wrapper_registration] + else: + if not fn.info: + key = "Default" + type_definition = METHOD_DEFINITION.substitute( + return_type=cpp.returns_type( + f.func.returns, symint=True + ).cpp_type(), + type_wrapper_name=type_wrapper_name(f, key), + type_definition_body=emit_body(fn, key), + formals=formals, + ) + wrapper_registration = gen_wrapper_registration(f, key) + result[f"type_derived_method_definitions_{key}"] = [type_definition] + result[f"wrapper_registrations_{key}"] = [wrapper_registration] + else: + for key in fn.info.keys(): + type_definition = METHOD_DEFINITION.substitute( + return_type=cpp.returns_type( + f.func.returns, symint=True + ).cpp_type(), + type_wrapper_name=type_wrapper_name(f, key), + type_definition_body=emit_body(fn, key), + formals=formals, + ) + wrapper_registration = gen_wrapper_registration(f, key) + result[f"type_derived_method_definitions_{key}"] = [type_definition] + result[f"wrapper_registrations_{key}"] = [wrapper_registration] + # See Note [Manual Backend kernels] + assert (name in MANUAL_BACKEND) == f.manual_kernel_registration + # If you want to register a kernel to Autograd, you must make the op abstract. + # In other words, this op must have dispatch section in native_functions.yaml. + if name in MANUAL_AUTOGRAD_AND_TRACER or ( + fn.info and any(info.has_derivatives for info in fn.info.values()) + ): + msg = ( + f"There's a formula for {name}(or its functional variant) in derivatives.yaml. " + f"It's required to add a dispatch section for it with explicit supported backends e.g CPU/CUDA " + f"or CompositeExplicitAutograd in native_functions.yaml. Please see " + f"https://github.com/pytorch/pytorch/tree/master/aten/src/ATen/native#choosing-the-right-dispatch-keyword " + f"for instructions to choose the right dispatch keyword." + ) + assert f.is_abstract, msg + + return result + + +_foreach_ops_without_differentiability_info = { + # No reference backward available due to the lack of `{maximum, minimum}(tensor, scalar)`. + ("_foreach_maximum", "Scalar"), + ("_foreach_maximum", "ScalarList"), + ("_foreach_minimum", "Scalar"), + ("_foreach_minimum", "ScalarList"), + # No reference backward available as addcdiv/addcmul don't support Tensor as scaling factor. + ("_foreach_addcdiv", "Tensor"), + ("_foreach_addcmul", "Tensor"), + ("_foreach_copy", ""), +} + +_foreach_ops_with_different_arity = { + # These ops lack `alpha` of scaling factor to applied to the right hand side argument. + ("_foreach_add", "Scalar"), + ("_foreach_add", "ScalarList"), + ("_foreach_sub", "Scalar"), + ("_foreach_sub", "ScalarList"), +} + + +@with_native_function_with_differentiability_info_and_key +def emit_body( + fn: NativeFunctionWithDifferentiabilityInfo, key: str = "Default" +) -> List[str]: + assert dispatch_strategy(fn) == "use_derived" + f = fn.func + info = fn.info[key] if fn.info else None + fw_derivatives = fn.fw_derivatives.get(key, []) if fn.fw_derivatives else [] + + name = cpp.name(f.func) + inplace = f.func.kind() == SchemaKind.inplace + is_out_fn = f.func.kind() == SchemaKind.out + returns_void = len(f.func.returns) == 0 + base_name = get_base_name(f) + view_info = get_view_info(f) + + is_foreach = name.startswith("_foreach") + is_inplace_foreach = is_foreach and inplace + if is_inplace_foreach: + inplace_foreacharg2refarg: Dict[Argument, Argument] = {} + refargname2inplace_foreacharg: Dict[str, Argument] = {} + base_name_and_overload_name = (f.func.name.name.base, f.func.name.overload_name) + if info is None: + assert ( + base_name_and_overload_name + in _foreach_ops_without_differentiability_info + ), f"{'.'.join(base_name_and_overload_name)} should have a differentiability info" + else: + assert ( + len(f.func.arguments.flat_non_out) + == len(info.func.func.arguments.flat_non_out) + ) or (base_name_and_overload_name in _foreach_ops_with_different_arity), ( + f"{'.'.join(base_name_and_overload_name)} has {len(f.func.arguments.flat_non_out)} args " + f"but the reference has {len(info.func.func.arguments.flat_non_out)}" + ) + for foreach_arg, ref_arg in zip( + f.func.arguments.flat_non_out, info.func.func.arguments.flat_non_out + ): + foreach_arg_type = foreach_arg.type + if isinstance(foreach_arg_type, ListType): + foreach_arg_type = foreach_arg_type.elem + assert foreach_arg_type == ref_arg.type + inplace_foreacharg2refarg[foreach_arg] = ref_arg + refargname2inplace_foreacharg[ref_arg.name] = foreach_arg + + def gen_differentiable_input( + arg: Union[Argument, SelfArgument, TensorOptionsArguments] + ) -> Optional[DifferentiableInput]: + if isinstance(arg, TensorOptionsArguments): + return None + a: Argument = arg.argument if isinstance(arg, SelfArgument) else arg + + # TODO: `cpp_type` is only to keep it byte-for-byte compatible with the old codegen, should remove. + # NB: This is not a clone of cpp.argument() - TensorOptionsArguments / faithful / binds are + # not handled properly as they are irrelevant for this codegen. + cpp_type = cpp.argument_type(a, binds=a.name, symint=True).cpp_type() + + if not is_differentiable(a.name, a.type, info): + return None + return DifferentiableInput( + name=a.name, + type=a.type, + cpp_type=cpp_type, + ) + + @with_native_function + def gen_differentiable_inputs(f: NativeFunction) -> List[DifferentiableInput]: + arguments = list(f.func.arguments.non_out) + if is_inplace_foreach and info is not None: + for i, arg in enumerate(f.func.arguments.flat_non_out): + if arg in inplace_foreacharg2refarg: + # note(crcrpar): From what I understand, what matters is only the name. + # Thus originally I only replace argument only when the names are different. + # TODO(crcrpar): Make it simpler. + mapped_arg = inplace_foreacharg2refarg[arg] + arguments[i] = Argument( + mapped_arg.name, + mapped_arg.type, + mapped_arg.default, + mapped_arg.annotation, + ) + return list(mapMaybe(gen_differentiable_input, arguments)) + + def find_args_with_derivatives( + differentiable_inputs: List[DifferentiableInput], + ) -> List[DifferentiableInput]: + """Find arguments that have derivative definitions""" + if info is None or not info.has_derivatives: + return differentiable_inputs + names = {name for d in info.derivatives for name in d.var_names} + differentiable = [arg for arg in differentiable_inputs if arg.name in names] + if len(differentiable) != len(names): + missing = names - {arg.name for arg in differentiable} + raise RuntimeError( + f"Missing arguments for derivatives: {missing} in {info.name}" + ) + return differentiable + + differentiable_inputs = gen_differentiable_inputs(f) + args_with_derivatives = find_args_with_derivatives(differentiable_inputs) + differentiable_outputs = gen_differentiable_outputs(fn, key) + + undifferentiable = (base_name in DONT_REQUIRE_DERIVATIVE) or ( + name in DONT_REQUIRE_DERIVATIVE + ) + + requires_derivative = ( + (not undifferentiable) + and (len(differentiable_inputs) > 0) + and ( + (len(differentiable_outputs) > 0) + # note(crcrpar): In-place foreach functions are a void function. + or is_inplace_foreach + ) + ) + + if ( + info is not None + and info.has_derivatives + and not requires_derivative + # out= ops are allowed to have zero returns which cause requires_derivative to be False + # we shouldn't error out though (out= ops for autograd just redispatch) + and len(f.func.returns) > 0 + ): + raise RuntimeError( + f"ERROR: derivative ignored for {name} -- specified an autograd function without derivative" + ) + + # note(crcrpar): In-place foreach functions do not support forward AD + if requires_derivative and len(fw_derivatives) > 0 and not is_inplace_foreach: + assert sum(len(derivative.var_names) for derivative in fw_derivatives) == len( + differentiable_outputs + ), ( + "Expected the number of forward derivatives implemented to match the " + "number of differentiable outputs. NB: This only applies when at least " + "one forward derivative is implemented. Not implementing any forward " + "derivatives is also okay, and we would require inputs to the op to " + "not have associated tangents in that case." + ) + + try_jit_decomposition = ( + requires_derivative + and len(fw_derivatives) == 0 + and (not modifies_arguments(f)) + and (not returns_void) + ) + + def emit_save_inputs() -> List[str]: + setup: List[str] = [] + if info is None or not info.has_derivatives: + return setup + + has_tensorlist_arg = any( + is_tensor_list_type(arg.type) for arg in args_with_derivatives + ) + + # We don't want to save tensors if we know that they will never be used + # when computing the derivative, so we add guards to those statements + def guard_for(arg: SavedAttribute) -> Optional[str]: + assert info is not None + + # It's hard to determine the edge offset if we have TensorLists + # NOTE(crcrpar): in-place foreach functions' arguments include tensorlist + # but their derivatives don't use it, so let them bypass this check. + if has_tensorlist_arg and (not is_inplace_foreach): + return None + + # Empirical evaluation of the cases where we insert those guards in + # backward show that they are somewhat useless. E.g. there's no need + # to guard on some values captured from forward, because they had to + # require_grad if the backward function even gets executed. I don't + # have any good ideas for detecting those cases, so I simply disabled the + # checks. + if "backward" in info.name: + return None + + # If there's a single derivative we could compute, we already have + # a requires_grad check that is sufficient + if len(args_with_derivatives) <= 1: + return None + + # We really only care about trimming down the amount of tensors we save + if arg.nctype.type != BaseCType(tensorT): + return None + + # We want to emit simple guards, so we only allow that if checking one + # input is enough to determine whether we need that value + used_in = [d for d in info.derivatives if arg in d.saved_inputs] + assert len(used_in) > 0 + if len(used_in) != 1: + return None + derivative = used_in[0] + + # Case with multioutput formulas + # TODO: process all derivative formulas!!! + if len(derivative.var_names) != 1: + wrap_opt_if_start = derivative.formula.find( + f"wrap_opt_if({arg.nctype.name}" + ) + if wrap_opt_if_start == -1: + return None + + wrap_opt_if_match = re.match( + rf"wrap_opt_if\({arg.nctype.name},(.*?)\)", + derivative.formula[wrap_opt_if_start:], + ) + assert wrap_opt_if_match is not None + + # Condition is between 'wrap_opt_if(var_name,' and ')'. + condition_slice = slice(len(rf"wrap_opt_if\({arg.nctype.name},"), -1) + wrap_opt_if_condition = wrap_opt_if_match.group(0)[ + condition_slice + ].strip() + # replace 'grad_input_mask[num]' with 'grad_fn->should_compute_output(num)' + wrap_opt_if_condition = re.sub( + r"grad_input_mask\[(\d+)\]", + r"grad_fn->should_compute_output(\1)", + wrap_opt_if_condition, + ) + return f"{wrap_opt_if_condition}" + + # Figure out the offset of the edge that uses this variable + derivative_var_name = derivative.var_names[0] + for edge_off, a in enumerate(args_with_derivatives): + if a.name == derivative_var_name: + break + else: + raise AssertionError() + return f"grad_fn->should_compute_output({edge_off})" + + if is_inplace_foreach: + save_input_stmts = save_variables(info.all_saved_inputs, False, guard_for) + if save_input_stmts: + setup.append( + LOOP_OVER_VECTOR_OF_GRAD_FNS.substitute( + preamble="", statements=save_input_stmts + ) + ) + else: + setup.extend(save_variables(info.all_saved_inputs, False, guard_for)) + for arg in args_with_derivatives: + if is_tensor_list_type(arg.type): + setup.append(f"grad_fn->{arg.name}_size_ = {arg.name}.size();") + return setup + + def setup_derivative(differentiable_inputs: List[DifferentiableInput]) -> List[str]: + body: List[str] = [] + if is_out_fn: + # For out functions, ensure that no input or output requires grad + body.append(DECLARE_GRAD_FN.substitute(op="Node")) + body.append( + SETUP_NONE_REQUIRES_GRAD.substitute( + base_name=base_name, + args_to_check=[arg.name for arg in differentiable_inputs], + ) + ) + body.append( + SETUP_NONE_REQUIRES_GRAD.substitute( + base_name=base_name, + args_to_check=[arg.name for arg in differentiable_outputs], + ) + ) + return body + + op = info.op if info is not None and info.has_derivatives else "NotImplemented" + setup = [] + if not is_inplace_foreach: + setup.extend( + ASSIGN_GRAD_FN.substitute( + op=op, + op_ctor="" + if info is not None and info.has_derivatives + else f'"{cpp.name(f.func)}"', + args_with_derivatives=[arg.name for arg in args_with_derivatives], + ).split("\n") + ) + else: + # note(crcrpar): Assuming in-place foreach function's self_arg is always TensorList. + list_like_arg = "self" + args = [arg.name for arg in args_with_derivatives] + for i, arg in enumerate(args): + if is_inplace_foreach and info is not None: + if arg in refargname2inplace_foreacharg: + foreach_arg = refargname2inplace_foreacharg[arg] + args[i] = foreach_arg.name + ( + "[i]" if isinstance(foreach_arg.type, ListType) else "" + ) + else: + if arg == list_like_arg: + args[i] = arg + "[i]" + setup.extend( + ASSIGN_VECTOR_OF_GRAD_FN.substitute( + op=op, + op_ctor="" + if info is not None and info.has_derivatives + else f'"{cpp.name(f.func)}"', + args_with_derivatives=args, + irange=f"{list_like_arg}.size()", + ).split("\n") + ) + setup.extend(emit_save_inputs()) + + body.extend( + emit_check_no_requires_grad(differentiable_inputs, args_with_derivatives) + ) + declare_grad_fn_template = ( + DECLARE_GRAD_FN if not is_inplace_foreach else DECLARE_VECTOR_OF_GRAD_FN + ) + body.append(declare_grad_fn_template.substitute(op=op)) + body.append(SETUP_DERIVATIVE.substitute(setup=setup)) + return body + + def emit_check_if_in_complex_autograd_allowlist() -> List[str]: + body: List[str] = [] + if base_name in GRADIENT_IMPLEMENTED_FOR_COMPLEX: + return body + for arg in differentiable_outputs: + name = arg.name + # TODO: should be `arg.type.is_tensor_like()`? + if arg.cpp_type == "at::Tensor" or arg.cpp_type in TENSOR_LIST_LIKE_CTYPES: + body.append(f'throw_error_for_complex_autograd({name}, "{base_name}");') + return body + + def emit_check_no_requires_grad( + tensor_args: List[DifferentiableInput], + args_with_derivatives: List[DifferentiableInput], + ) -> List[str]: + """Checks that arguments without derivatives don't require grad""" + body: List[str] = [] + for arg in tensor_args: + if arg in args_with_derivatives: + continue + arg_name = arg.name + if info and arg_name in info.non_differentiable_arg_names: + continue + if arg_name == "output": + # Double-backwards definitions sometimes take in 'input' and + # 'output', but only define the derivative for input. + continue + body.append(f'check_no_requires_grad({arg_name}, "{arg_name}", "{name}");') + return body + + def emit_original_self_definition() -> List[str]: + body: List[str] = [] + if inplace: + if is_inplace_foreach: + body.append( + "std::vector> original_selfs(self.size());" + ) + else: + body.append("c10::optional original_self;") + + all_forward_grad_cond = [] + for derivative in fw_derivatives: + if derivative.required_original_self_value: + all_forward_grad_cond.append( + get_any_has_forward_grad_name(derivative.var_names) + ) + + if all_forward_grad_cond: + if not is_inplace_foreach: + body.append(f'if ({" || ".join(all_forward_grad_cond)}) {{') + body.append(" original_self = self.clone();") + body.append("}") + else: + current_all_forward_grad_cond = [ + f"{cond}[i]" for cond in all_forward_grad_cond + ] + body.append("for (const auto& i : c10::irange(self.size())) {") + body.append( + f" if ({' || '.join(current_all_forward_grad_cond)}) {{" + ) + body.append(" original_selfs[i] = self[i].clone();") + body.append(" }") + body.append("}") + + return body + + def save_variables( + saved_variables: Sequence[SavedAttribute], + is_output: bool, + guard_for: Callable[[SavedAttribute], Optional[str]] = lambda name: None, + ) -> Sequence[str]: + # assign the saved variables to the generated grad_fn + stmts: List[str] = [] + for arg in sorted(saved_variables, key=lambda sa: str(sa.nctype.name)): + name = ( + arg.nctype.name.name + if isinstance(arg.nctype.name, SpecialArgName) + else arg.nctype.name + ) + foreacharg: Optional[Argument] = None + is_foreacharg_list_type: bool = False + type = arg.nctype.type + expr = arg.expr + stmts_prepend = None + if is_inplace_foreach and info is not None: + # todo(crcrpar): See if we can add some check e.g. `assert foreacharg is not None`. + # for now the example assert would fail. + name_to_query = name.split("_scalar_type")[0] + if name_to_query in refargname2inplace_foreacharg: + foreacharg = refargname2inplace_foreacharg[name_to_query] + is_foreacharg_list_type = isinstance(foreacharg.type, ListType) + if foreacharg is not None: + name_in_expr = ( + f"{foreacharg.name}{'[i]' if is_foreacharg_list_type else ''}" + ) + src_name = name + if "_scalar_type" in src_name: + split_src_name = src_name.split("_scalar_type") + assert len(split_src_name) == 2 + src_name = split_src_name[0] + expr = expr.replace(src_name, name_in_expr) + if ( + type == BaseCType(tensorT) + or type == OptionalCType(BaseCType(tensorT)) + or type == MutRefCType(OptionalCType(BaseCType(tensorT))) + or (is_output and type == BaseCType(scalarT)) + ): + # note(crcrpar): Here `expr` is generated from scratch, `arg.expr` is ignored. + var = name + name += "_" + if var == "self" and inplace: + original_self_var = ( + "original_self" + if not is_inplace_foreach + else "original_selfs[i]" + ) + self_var = var if not is_inplace_foreach else var + "[i]" + stmts_prepend = f"if (!{original_self_var}.has_value()) {original_self_var} = {self_var}.clone()" + var = f"{original_self_var}.value()" + assert not is_output + if inplace and is_output: + assert name == "result_" + var = ( + "self[i]" + if is_inplace_foreach or is_foreacharg_list_type + else "self" + ) + is_inplace_view = f"{var}.is_view()" + expr = f"SavedVariable({var}, {str(is_output).lower()}, {is_inplace_view})" + else: + expr = f"SavedVariable({var}, {str(is_output).lower()})" + if foreacharg is not None and "original_selfs" not in expr: + expr = expr.replace(src_name, name_in_expr) + elif ( + type == BaseCType(tensorListT) + or type == ListCType(OptionalCType(BaseCType(tensorT))) + or type == BaseCType(iTensorListRefT) + or type == VectorCType(BaseCType(tensorT)) + ): + # See Note [nuanced return type of out-of-place foreach functions] + if type == VectorCType(BaseCType(tensorT)): + assert is_foreach and is_output + expr = f"make_saved_variable_list({name}, {str(is_foreach and is_output).lower()})" + name += "_" + elif type == BaseCType(intArrayRefT): + expr = expr + ".vec()" + elif type == BaseCType(symIntArrayRefT): + expr = expr + ".vec()" + elif type == BaseCType(stringT): + expr = f"std::string({expr})" + elif type == OptionalCType(BaseCType(stringT)): + expr = f"{expr}.has_value() ? c10::optional(std::string({expr}.value())) : c10::nullopt" + elif type == ArrayRefCType( + elem=BaseCType(type=BaseCppType(ns="at", name="Scalar")) + ): + expr = expr + ".vec()" + + guard = guard_for(arg) + if guard is None: + if stmts_prepend: + stmts.append(f"{stmts_prepend};") + stmts.append(f"grad_fn->{name} = {expr};") + else: + stmts.append(f"if ({guard}) {{") + if stmts_prepend: + stmts.append(f" {stmts_prepend};") + stmts.append(f" grad_fn->{name} = {expr};") + stmts.append("}") + return stmts + + # Generates a Dispatcher::redispatch() call into the dispatcher. We do this mainly for performance reasons: + # - Pre-compute the full DispatchKeySet. This saves the dispatcher from having to read from TLS. + # - redispatch() avoids a redundant call to RecordFunction, which was already called right before + # we entered this autograd kernel. + def emit_dispatch_call( + f: NativeFunction, input_base: str, unpacked_args: Sequence[str] + ) -> str: + """Dispatch call via function in a namespace or method on Tensor.""" + dispatcher_sig = DispatcherSignature.from_schema(f.func) + dispatcher_exprs = dispatcher_sig.exprs() + + # code-generated autograd kernels plumb and recompute dispatch keys directly through the kernel for performance. + # Ops also always have a function variant of the redispatch API. + # See Note [Plumbing Keys Through The Dispatcher] for details. + dispatch_key_set = "ks & c10::after_autograd_keyset" + call = CALL_REDISPATCH.substitute( + api_name=cpp.name( + f.func, + faithful_name_for_out_overloads=True, + symint_overload=f.func.has_symint(), + ), + unpacked_args=[dispatch_key_set] + list(unpacked_args), + ) + return call + + def wrap_output( + f: NativeFunction, unpacked_bindings: List[Binding], var: str + ) -> str: + call = "" + rhs_value: Optional[str] = None + if not any(r.type.is_tensor_like() for r in f.func.returns): + rhs_value = var + else: + rhs_value = f"std::move({var})" + assert rhs_value is not None + call += ASSIGN_RETURN_VALUE.substitute( + return_values=tie_return_values(f), rhs_value=rhs_value + ) + return call + + def check_tensorimpl_and_storage( + call: str, unpacked_bindings: List[Binding] + ) -> str: + # See NOTE [ TensorImpl and Storage Pointer Sanity Checks ] + stmts_before_call: List[str] = [] + stmts_after_call: List[str] = [] + + if cpp.name(f.func) in DONT_ENFORCE_SAME_TENSOR_IMPL_OR_STORAGE: + return call + + # Check properties of inputs (enforce (1)) + for unpacked_binding in unpacked_bindings: + arg = unpacked_binding.name + noref_cpp_type = unpacked_binding.nctype.type.remove_const_ref() + if noref_cpp_type == BaseCType(tensorListT) or noref_cpp_type == BaseCType( + iTensorListRefT + ): + stmts_before_call += [ + SAVE_TENSORLIST_STORAGE.substitute(tensorlist_name=arg), + SAVE_TENSORLIST_IMPL.substitute(tensorlist_name=arg), + ] + stmts_after_call += [ + ENFORCE_SAME_TENSORLIST_STORAGE.substitute(tensorlist_name=arg), + ENFORCE_SAME_TENSORLIST_IMPL.substitute(tensorlist_name=arg), + ] + elif noref_cpp_type == ListCType(OptionalCType(BaseCType(tensorT))): + stmts_before_call += [ + SAVE_OPTIONALTENSORLIST_STORAGE.substitute(tensorlist_name=arg), + SAVE_OPTIONALTENSORLIST_IMPL.substitute(tensorlist_name=arg), + ] + stmts_after_call += [ + ENFORCE_SAME_OPTIONALTENSORLIST_STORAGE.substitute( + tensorlist_name=arg + ), + ENFORCE_SAME_OPTIONALTENSORLIST_IMPL.substitute( + tensorlist_name=arg + ), + ] + elif noref_cpp_type == BaseCType(tensorT): + stmts_before_call += [ + SAVE_TENSOR_STORAGE.substitute(tensor_name=arg), + SAVE_TENSOR_IMPL.substitute(tensor_name=arg), + ] + stmts_after_call += [ + ENFORCE_SAME_TENSOR_STORAGE.substitute( + tensor_name=arg, out_tensor_name=arg + ), + ENFORCE_SAME_TENSOR_IMPL.substitute(tensor_name=arg), + ] + + assert (stmts_before_call and stmts_after_call) or ( + not stmts_before_call and not stmts_after_call + ) + + # Check properties of outputs (enforce (2), (3)) + if f.func.kind() not in (SchemaKind.inplace, SchemaKind.out): + base_name = f.func.name.name.base # TODO: should be str(f.func.name.name)? + aliased_arg_name = ALL_VIEW_FUNCTIONS.get(base_name, None) + if aliased_arg_name is not None: + aliased_arg_name = unpacked_name(aliased_arg_name) + for i, (ret, ret_name) in enumerate( + zip(f.func.returns, cpp.return_names(f)) + ): + noref_cpp_type = cpp.return_type(ret, symint=True).remove_const_ref() + if noref_cpp_type == BaseCType(tensorT): + if aliased_arg_name is not None: + assert ( + i == 0 + ), "Expect non-CompositeImplicitAutograd view function {base} to return single output" + stmts_after_call += [ + ENFORCE_SAME_TENSOR_STORAGE.substitute( + tensor_name=aliased_arg_name, out_tensor_name=ret_name + ) + ] + else: + if ( + type_wrapper_name(f) + not in DONT_ENFORCE_STORAGE_IMPL_USE_COUNT + ): + stmts_after_call += [ + ENFORCE_TENSOR_STORAGE_USE_COUNT_EQUALS_ONE.substitute( + tensor_name=ret_name, fn_name=type_wrapper_name(f) + ) + ] + + if type_wrapper_name(f) not in DONT_ENFORCE_TENSOR_IMPL_USE_COUNT: + stmts_after_call += [ + ENFORCE_TENSOR_IMPL_USE_COUNT_LT_OR_EQ_ONE.substitute( + tensor_name=ret_name, fn_name=type_wrapper_name(f) + ) + ] + + # Currently we don't have any functions that return the following types, but + # we should update the checks once we do + elif noref_cpp_type == ListCType(OptionalCType(BaseCType(tensorT))): + raise AssertionError( + f"Please add use_count checks for {noref_cpp_type}" + ) + elif noref_cpp_type == BaseCType(tensorListT): + raise AssertionError( + f"Please add use_count checks for {noref_cpp_type}" + ) + + if stmts_before_call and stmts_after_call: + call = ( + RUN_ONLY_IN_DEBUG_MODE.substitute(statements=stmts_before_call) + + call + + RUN_ONLY_IN_DEBUG_MODE.substitute(statements=stmts_after_call) + ) + return call + + def emit_call( + f: NativeFunction, unpacked_bindings: List[Binding], try_jit_decomposition: bool + ) -> str: + # We only care about adding `at::AutoDispatchBelowAutograd` guard for non-variable dispatch + # (which corresponds to 'use_derived' strategy). The purpose of this guard is to make sure + # the baseType operations still dispatch to non-Variable type, even if the arguments passed + # in are now Variables. + # See NOTE [ Treating Variables as non-Variables in type dispatch ] for details. + unpacked_args = [b.name for b in unpacked_bindings] + base_type_call = emit_dispatch_call(f, "self_", unpacked_args) + + if get_view_info(f) is not None or modifies_arguments(f): + guard = "at::AutoDispatchBelowAutograd guard;" + else: + guard = "at::AutoDispatchBelowADInplaceOrView guard;" + + any_has_forward_grad = ( + get_any_has_fw_grad_cond(derivative=None) + if requires_derivative + else "false" + ) + return_types = ", ".join( + [cpp.return_type(a, symint=True).cpp_type() for a in f.func.returns] + ) + if len(f.func.returns) > 1: + return_types = f"std::tuple<{return_types}>" + + arg_names = [ + a.name + for a in cpp.arguments( + f.func.arguments, + faithful=True, + symint=True, + method=False, + cpp_no_default_args=set(), + ) + ] + + if not modifies_arguments(f) and not returns_void: + if try_jit_decomposition: + call = DISPATCH_TO_NON_VAR_TYPE_WITH_TMP_RETURN_VALUES_JVP_DECOMP.substitute( + base_type_call=base_type_call, + tmp_var=TMP_VAR, + guard=guard, + any_has_forward_grad=any_has_forward_grad, + op_name=cpp.name(f.func), + op_overload=f.func.name.overload_name, + return_types=return_types, + arg_names=arg_names, + ) + else: + call = DISPATCH_TO_NON_VAR_TYPE_WITH_TMP_RETURN_VALUES.substitute( + base_type_call=base_type_call, + tmp_var=TMP_VAR, + guard=guard, + ) + + call += wrap_output(f, unpacked_bindings, TMP_VAR) + else: + assert not try_jit_decomposition + call = DISPATCH_TO_NON_VAR_TYPE_WITHOUT_RETURN_VALUES.substitute( + base_type_call=base_type_call, guard=guard + ) + call = check_tensorimpl_and_storage(call, unpacked_bindings) + return call + + def emit_history() -> str: + fn = "rebase" if modifies_arguments(f) and view_info is None else "set" + output_names = [r.name for r in differentiable_outputs] + # TODO: flatten allocates a std::vector, which could be expensive + outs = CodeTemplate("flatten_tensor_args( ${outs} )").substitute( + outs=output_names if not is_inplace_foreach else "self" + ) + if not is_inplace_foreach: + return SET_HISTORY.substitute(fn=fn, differentiable_outputs=outs) + else: + return LOOP_OVER_VECTOR_OF_GRAD_FNS.substitute( + preamble=( + f"auto differentiable_outputs = {outs};\n" + f"TORCH_INTERNAL_ASSERT(differentiable_outputs.size() == grad_fns.size());" + ), + statements=f"{fn}_history(differentiable_outputs[i], grad_fns[i]);", + ) + + def emit_save_outputs() -> str: + if is_out_fn: + # out functions don't currently support differentiation + return "" + if info is not None and info.has_derivatives: + stmts = save_variables(info.all_saved_outputs, True) + if len(stmts) == 0: + return "" + if not is_inplace_foreach: + return CONDITIONAL.substitute(cond="grad_fn", statements=stmts) + else: + return LOOP_OVER_VECTOR_OF_GRAD_FNS.substitute( + preamble="", statements=stmts + ) + return "" + + def emit_any_requires_grad() -> List[str]: + extra_condition = "" + if info and info.output_differentiability_conditions: + assert len(info.output_differentiability_conditions) == 1 + extra_condition = f"_any_requires_grad &= ({info.output_differentiability_conditions[0]});" + names_of_args_with_derivatives = [arg.name for arg in args_with_derivatives] + if is_inplace_foreach and info is not None: + for i, arg in enumerate(names_of_args_with_derivatives): + for f_arg, r_arg in inplace_foreacharg2refarg.items(): + if arg == r_arg.name: + names_of_args_with_derivatives[i] = f_arg.name + return [ + SETUP_ANY_REQUIRES_GRAD.substitute( + args_with_derivatives=names_of_args_with_derivatives, + extra_differentiability_conditions=extra_condition, + ) + ] + + def get_any_has_forward_grad_name(var_names: Tuple[str, ...]) -> str: + if len(var_names) == 1: + return f"_any_has_forward_grad_{var_names[0]}" + else: + return f'_any_has_forward_grad_{"_".join(var_names)}' + + def emit_any_has_forward_grad() -> List[str]: + content: List[str] = [] + if not is_foreach: + for derivative in fw_derivatives: + requires_fw_grad = get_any_has_fw_grad_cond(derivative=derivative) + if info and info.output_differentiability_conditions: + assert len(info.output_differentiability_conditions) == 1 + requires_fw_grad = f"({info.output_differentiability_conditions[0]}) && {requires_fw_grad}" + content.append( + f"[[maybe_unused]] auto {get_any_has_forward_grad_name(derivative.var_names)} = {requires_fw_grad};" + ) + else: + for derivative in fw_derivatives: + bool_vector_name = get_any_has_forward_grad_name(derivative.var_names) + cur_derivative_conditions = [] + for inp in differentiable_inputs: + if derivative.required_inputs_fw_grad is None: + continue + if inp.name not in derivative.required_inputs_fw_grad: + continue + inp_name = ( + inp.name + if not inplace + else refargname2inplace_foreacharg[inp.name].name + ) + inp_type = ( + inp.type + if not inplace + else refargname2inplace_foreacharg[inp.name].type + ) + is_list_type = is_tensor_list_type(inp_type) + if is_list_type: + if inp_name != "self": + content.append( + FW_DERIVATIVE_SIZE_CHECK_TEMPLATE.substitute( + inp_name=inp_name + ) + ) + cur_derivative_conditions.append( + FW_DERIVATIVE_CHECK_TEMPLATE.substitute( + req_inp=inp_name + "[i]" + ) + ) + else: + cur_derivative_conditions.append( + FW_DERIVATIVE_CHECK_TEMPLATE.substitute(req_inp=inp_name) + ) + + content.append(f"std::vector {bool_vector_name}(self.size());") + content.append("for (const auto& i : c10::irange(self.size())) {") + content.append( + f" {bool_vector_name}[i] = {' || '.join(cur_derivative_conditions)};" + ) + content.append("}") + return content + + def emit_check_inplace() -> List[str]: + if not inplace: + return [] + return [ + f"check_inplace({arg.name}, _any_requires_grad);" + for arg in differentiable_outputs + ] + + def emit_fw_derivatives() -> List[str]: + content: List[str] = [] + fw_grad_setters: List[str] = [] + for derivative in fw_derivatives: + res = derivative.var_names + if f.func.name.name.inplace: + assert ( + len(res) == 1 + ), "Expected number of outputs to be 1 if function is inplace" + # TODO update this when inplace namings are unified + res = ("self",) + + assert derivative.required_inputs_fw_grad is not None + + unpacked_arguments = "" + for inp in differentiable_inputs: + inp_name = inp.name + is_input_tensorlist = is_foreach and is_tensor_list_type( + inp.type + if not inplace + else refargname2inplace_foreacharg[inp.name].type + ) + input_suffix = "[i]" if is_input_tensorlist else "" + if is_inplace_foreach: + if inp.name in refargname2inplace_foreacharg: + inp_name = refargname2inplace_foreacharg[inp.name].name + zeros_fn = ( + "zeros" + if inplace and inp.name == "self" + else "_efficientzerotensor" + ) + if inp.name in derivative.required_inputs_fw_grad: + unpacked_arguments += ( + FW_DERIVATIVE_DEFINED_GRAD_TEMPLATE.substitute( + inp_name=inp.name, + inp=inp_name + input_suffix, + zeros_fn=zeros_fn, + ) + ) + if inp.name in (derivative.required_inputs_primal or []): + unpacked_arguments += ( + FW_DERIVATIVE_DEFINED_PRIMAL_TEMPLATE.substitute( + inp_name=inp.name, + inp=inp_name + input_suffix, + ) + ) + if derivative.required_original_self_value: + input_suffix = "s[i]" if is_inplace_foreach else "" + unpacked_arguments += FW_DERIVATIVE_DEFINED_GRAD_TEMPLATE.substitute( + inp_name="original_self", + inp="original_self" + input_suffix, + zeros_fn=zeros_fn, + ) + unpacked_arguments += FW_DERIVATIVE_DEFINED_PRIMAL_TEMPLATE.substitute( + inp_name="original_self", + inp="original_self" + input_suffix, + ) + elif inplace and derivative.is_reusing_outplace_formula: + # The gradient wasn't already cloned, do it if grad mode is enabled + unpacked_arguments += ( + "self_t = GradMode::is_enabled() ? self_t.clone() : self_t;" + ) + + if inplace: + is_inplace_str = "true" + else: + is_inplace_str = "false" + + requires_fw_grad = get_any_has_forward_grad_name(derivative.var_names) + + if all( + (isinstance(var_type, BaseType) and var_type.is_tensor_like()) + for var_type in derivative.var_types + ): + # Is there a way to get from BaseType to BaseCType + if len(derivative.var_types) == 1: + opt_res_grad_type = OptionalCType(BaseCType(tensorT)).cpp_type() + if not is_foreach: + fw_grad_setters.append( + FW_DERIVATIVE_SETTER_TENSOR.substitute( + out_arg=res[0], is_inplace=is_inplace_str + ) + ) + else: + assert res[0] == ("result" if not inplace else "self") + fw_grad_setters.append( + FW_DERIVATIVE_SETTER_TENSOR_FOREACH.substitute( + out_arg=res[0], is_inplace=is_inplace_str + ) + ) + requires_fw_grad += f" && ({derivative.var_names[0]}.defined())" + else: + tuple_type = TupleCType( + [BaseCType(tensorT)] * len(derivative.var_types) + ) + opt_res_grad_type = OptionalCType(tuple_type).cpp_type() + for idx, single_res in enumerate(res): + fw_grad_setters.append( + FW_DERIVATIVE_SETTER_MULTI_OUTPUT.substitute( + idx=idx, all_res="_".join(res), out_arg=single_res + ) + ) + elif ( + isinstance(derivative.var_types[0], ListType) + and derivative.var_types[0].is_tensor_like() + ): + assert ( + len(derivative.var_types) == 1 + ), "Expected number of outputs to be 1 if function returns ListType" + if not is_foreach: + opt_res_grad_type = OptionalCType( + VectorCType(BaseCType(tensorT)) + ).cpp_type() + fw_grad_setters.append( + FW_DERIVATIVE_SETTER_TENSOR_LIST.substitute( + out_arg=res[0], is_inplace=is_inplace_str + ) + ) + else: + # TODO(crcrpar): Should this (= the foreach specific logic) be refactored somehow? + # Only out-place foreach functions that have entries in `tools/autograd/derivatives.yaml` + # can reach here. + opt_res_grad_type = OptionalCType(BaseCType(tensorT)).cpp_type() + fw_grad_setters.append( + FW_DERIVATIVE_SETTER_TENSOR_FOREACH.substitute( + out_arg=res[0], is_inplace=is_inplace_str + ) + ) + else: + raise RuntimeError("Unsupported output type for forward derivative") + + if not is_foreach: + fw_grad_opt_definition = f"{opt_res_grad_type} {'_'.join(res)}_new_fw_grad_opt = c10::nullopt;" + # View ops create fw_grad that already is a view of the base's fw_grad so just use that + content.append( + FW_DERIVATIVE_TEMPLATE.substitute( + fw_grad_opt_definition=fw_grad_opt_definition, + requires_fw_grad=requires_fw_grad, + formula=derivative.formula, + out_arg="_".join(res), + unpacked_arguments=unpacked_arguments, + ) + ) + else: + # note(crcrpar): Assuming `self` is TensorList. + fw_grad_opt_definition = ( + f"std::vector<{opt_res_grad_type}> {'_'.join(res)}_new_fw_grad_opts" + "(self.size(), c10::nullopt);" + ) + foreach_forward_grad_formula = derivative.formula + _foreach_arg: Union[Argument, DifferentiableInput] + if inplace: + for _foreach_arg, _ref_arg in inplace_foreacharg2refarg.items(): + # note(crcrpar): Massage only Scalar and ArrayRef here. + if not ( + is_tensor_type(_foreach_arg.type) + or is_tensor_list_type(_foreach_arg.type) + ): + pattern = _foreach_arg.name + if isinstance(_foreach_arg.type, ListType): + pattern += "[i]" + foreach_forward_grad_formula = ( + foreach_forward_grad_formula.replace( + _ref_arg.name, pattern + ) + ) + else: + if ( + "result" in foreach_forward_grad_formula + and "result[i]" not in foreach_forward_grad_formula + ): + foreach_forward_grad_formula = ( + foreach_forward_grad_formula.replace("result", "result[i]") + ) + + content.append( + FW_DERIVATIVE_FOREACH_TEMPLATE.substitute( + fw_grad_opt_definition=fw_grad_opt_definition, + vector_of_optional_tensor=f"{'_'.join(res)}_new_fw_grad_opts", + any_has_forward_grad_for_current_index=" || ".join( + get_any_has_forward_grad_name(derivative.var_names) + "[i]" + for derivative in fw_derivatives + ), + formula=foreach_forward_grad_formula, + unpacked_arguments=unpacked_arguments, + ) + ) + + # Set all the grads at the end to avoid: https://github.com/pytorch/pytorch/issues/67367 + content.append("\n".join(fw_grad_setters)) + return content + + def get_any_has_fw_grad_cond(derivative: Optional[ForwardDerivative]) -> str: + # + # Produces a condition string (e.g, "isFwGradDefined(grad_output) || isFwGradDefined(output)") + # + if derivative is None: + # (1) If a derivative is NOT provided, cond will check fw_grad of ALL differentiable inputs + # - Used in the out_fn case when we want to forbid fw derivatives + # - Used in the case where the fw_derivative is not defined, but we want + # To check if there is a decomposition registered for jvp + to_check: List[str] = [] + for inp in list( + mapMaybe( + gen_differentiable_input, + f.func.arguments.non_out + list(f.func.arguments.out), # type: ignore[operator] + ) + ): + if is_tensor_type(inp.type): + to_check.append( + FW_DERIVATIVE_CHECK_TEMPLATE.substitute(req_inp=inp.name) + ) + elif is_tensor_list_type(inp.type): + to_check.append( + FW_DERIVATIVE_TENSORLIST_CHECK_TEMPLATE.substitute( + req_inp=inp.name + ) + ) + else: + raise RuntimeError( + f'Unsupported input type for "{name}" when forbidding forward AD usage.' + ) + return f'({" || ".join(to_check)})' + else: + # (2) If derivative is provided, use that information to determine which inputs + # to check fw_grad for + assert derivative.required_inputs_fw_grad is not None + + if len(derivative.required_inputs_fw_grad) == 0: + # Handle functions like stack + # For these, we don't unpack anything and always call the user function + if not ( + len(differentiable_inputs) == 1 + and is_tensor_list_type(differentiable_inputs[0].type) + ): + raise RuntimeError( + f'No differentiable input to "{name}" is a differentiable Tensor (as the provided ' + "forward AD formula does not use any input tangent) even though a forward gradient " + "formula has been defined for it. This case should only happen for function that " + "take a single TensorList as input. All other cases are not supported right now." + ) + any_has_fw_grad = "true" + else: + any_has_fw_grad = " || ".join( + [ + ( + FW_DERIVATIVE_TENSORLIST_CHECK_TEMPLATE + if is_tensor_list_type(inp.type) + else FW_DERIVATIVE_CHECK_TEMPLATE + ).substitute(req_inp=inp.name) + for inp in differentiable_inputs + if inp.name in derivative.required_inputs_fw_grad + ] + ) + any_has_fw_grad = f"({any_has_fw_grad})" + + return any_has_fw_grad + + def emit_forbid_fw_derivatives(is_out_fn: bool = False) -> str: + if is_out_fn: + msg = "because it is an out= function" + else: + msg = ( + "because it has not been implemented yet.\\nPlease file an issue " + "to PyTorch at https://github.com/pytorch/pytorch/issues/new?template=feature-request.yml " + "so that we can prioritize its implementation." + ) + cond = get_any_has_fw_grad_cond(derivative=None) + return ( + FW_DERIVATIVE_FORBID_TEMPLATE.substitute(cond=cond, name=name, msg=msg) + if cond != "" + else "" + ) + + body: List[str] = [] + unpack_args_stats, unpacked_bindings = unpack_args(f) + + body.extend(unpack_args_stats) + if requires_derivative: + body.extend(emit_any_requires_grad()) + body.extend(emit_any_has_forward_grad()) + body.extend(emit_check_inplace()) + body.extend(emit_original_self_definition()) + body.extend(setup_derivative(differentiable_inputs)) + body.append(declare_returned_variables(f)) + + body.append(emit_call(f, unpacked_bindings, try_jit_decomposition)) + if requires_derivative: + # set_flags has to appear after version_counter, because rebase_history + # requires that the counter is incremented before it is called + body.append(emit_history()) + body.extend(emit_check_if_in_complex_autograd_allowlist()) + + if is_out_fn: + body.append(emit_forbid_fw_derivatives(is_out_fn=True)) + else: + if requires_derivative and not try_jit_decomposition: + if len(fw_derivatives) > 0: + body.extend(emit_fw_derivatives()) + else: + body.append(emit_forbid_fw_derivatives()) + + if requires_derivative: + # Save only after the forward AD has been set up + body.append(emit_save_outputs()) + + if str(f.func.name.name) in RESET_GRAD_ACCUMULATOR: + # `inplace` implies that there is exactly one output named `self`, + # so we can keep the generated code easy. If you need to + # `reset_grad_accumulator` in an operator that's not `inplace`, you can + # remove this assert but the code generation will get more elaborate + assert inplace + body.append("reset_grad_accumulator(self);") + if not returns_void: + body.append(f"return {get_return_value(f)};") + return body diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/load_derivatives.py b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/load_derivatives.py new file mode 100644 index 0000000000000000000000000000000000000000..6b336cd6888b256a1a150feac2cfa39bb0b86412 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/load_derivatives.py @@ -0,0 +1,1011 @@ +# Parses derivatives.yaml into autograd functions +# +# Each autograd function is represented by `DifferentiabilityInfo` containing +# a list of `Derivative`. See `torchgen.api.autograd` for the data models. +import re +from collections import defaultdict +from typing import Any, Counter, Dict, List, Match, Optional, Sequence, Set, Tuple + +import yaml +from torchgen.api import cpp + +from torchgen.api.autograd import ( + Derivative, + DifferentiabilityInfo, + ForwardDerivative, + SavedAttribute, +) +from torchgen.api.types import ( + BaseCType, + Binding, + boolT, + CppSignatureGroup, + layoutT, + longT, + NamedCType, + OptionalCType, + scalarTypeT, + SpecialArgName, + stringT, + symIntArrayRefT, + SymIntT, + tensorGeometryT, + tensorOptionsT, + typeAndSizeT, + VectorCType, +) +from torchgen.context import with_native_function +from torchgen.gen import get_grouped_by_view_native_functions, parse_native_yaml +from torchgen.model import ( + AUTOGRAD_KEYS, + FunctionSchema, + NativeFunction, + NativeFunctionsViewGroup, + OperatorName, + SchemaKind, + Type, + Variant, +) +from torchgen.utils import concatMap, IDENT_REGEX, split_name_params +from torchgen.yaml_utils import YamlLoader + +_GLOBAL_LOAD_DERIVATIVE_CACHE = {} + +_VALID_AUTOGRAD_KEYS = set(AUTOGRAD_KEYS) + + +# This function directly adds per-dispatchkey derivative entries for {view}_copy variants of each view op. +# Since every {view} and {view}_copy op shares the same derivative formula, +# we generate them here instead of duplicating them in the yaml. +# See Note [Codegen'd {view}_copy Operators] +def add_view_copy_derivatives( + infos: Dict[FunctionSchema, Dict[str, DifferentiabilityInfo]], + view_groups: List[NativeFunctionsViewGroup], +) -> None: + # Get the map from each view op's name to its corresponding view group + view_name_to_group: Dict[OperatorName, NativeFunctionsViewGroup] = { + g.view.func.name: g for g in view_groups + } + + view_infos = {} + + for info_dispatch_dict in infos.values(): + # maybe_view_group only needs to be calculated once per info_dispatch_dict + maybe_view_group = None + view_copy_differentiability_infos = {} + for dispatch_key, info in info_dispatch_dict.items(): + maybe_view_group = view_name_to_group.get(info.func.func.name, None) + if maybe_view_group is not None and maybe_view_group.view_copy is not None: + view_copy_info = info.create_view_copy_from_view_derivative( + maybe_view_group + ) + if view_copy_info is not None: + fn_schema = view_copy_info.func.func + view_copy_differentiability_infos[dispatch_key] = view_copy_info + else: + break + if len(view_copy_differentiability_infos) > 0: + assert fn_schema is not None + view_infos[fn_schema] = view_copy_differentiability_infos + + infos.update(view_infos) + + +def load_derivatives( + derivatives_yaml_path: str, native_yaml_path: str, tags_yaml_path: str +) -> Tuple[Dict[FunctionSchema, Dict[str, DifferentiabilityInfo]], Set[str]]: + # Do some caching as this is a deterministic function + global _GLOBAL_LOAD_DERIVATIVE_CACHE + key = (derivatives_yaml_path, native_yaml_path) + if key not in _GLOBAL_LOAD_DERIVATIVE_CACHE: + with open(derivatives_yaml_path) as f: + definitions = yaml.load(f, Loader=YamlLoader) + + funcs = parse_native_yaml(native_yaml_path, tags_yaml_path).native_functions + # From the parsed native functions, separate out the (generated) view_copy functions, + # so we can generate derivatives for them separately. + native_functions_with_view_groups = get_grouped_by_view_native_functions(funcs) + native_functions_without_view_copies = concatMap( + # We need to pull out the view_inplace ops too, since they might have their own derivative entries. + lambda g: [g] + if isinstance(g, NativeFunction) + else list(g.functions(include_copy=False)), + native_functions_with_view_groups, + ) + view_groups = [ + g + for g in native_functions_with_view_groups + if isinstance(g, NativeFunctionsViewGroup) + ] + + # What's the difference between function schema v.s. signature? + # function schema is the complete declaration including mutability annotation / default value and etc. + # signature is the canonical schema for a group of functions (in-place/out/functional variants) + # that are semantically related. + functions_by_signature: Dict[ + FunctionSchema, List[NativeFunction] + ] = defaultdict(list) + functions_by_schema: Dict[str, NativeFunction] = {} + for function in native_functions_without_view_copies: + functions_by_signature[function.func.signature()].append(function) + assert str(function.func) not in functions_by_schema + functions_by_schema[str(function.func)] = function + + # Keep track of how many of which ops we've seen so we can + # disambiguate them with a numeric suffix. + op_counter = Counter[str]() + + # infos is a dict that maps FunctionSchema -> a dict of per dispatch key DifferentiabilityInfos + # this is useful because in tools/autograd/gen_autograd.py:match_differentiability_info + # we ultimately need to categorize the DifferentiabilityInfos by FunctionSchema + infos: Dict[FunctionSchema, Dict[str, DifferentiabilityInfo]] = {} + used_dispatch_keys: Set[str] = set() + for defn_dict in definitions: + # Ensure that the old derivatives.yaml schema with no dispatch key can be loaded. + if "dispatch" not in defn_dict: + specification = defn_dict.pop("name") + output_differentiability = defn_dict.pop( + "output_differentiability", None + ) + defn_dict = {"name": specification, "dispatch": {"Default": defn_dict}} + if output_differentiability: + defn_dict["output_differentiability"] = output_differentiability + name, per_dispatch_diffinfos = create_differentiability_info( + defn_dict, + functions_by_signature, + functions_by_schema, + op_counter, + used_dispatch_keys, + ) + infos[name] = per_dispatch_diffinfos + + add_view_copy_derivatives(infos, view_groups) + + # cache both loaded infos as well a a set of all the dispatch_keys/aliases + # that appear in derivatives.yaml. used_dispatch_keys is useful for generating + # VariableType.cpp where we need a TORCH_LIBRARY_IMPL for every autograd dispatch key used + _GLOBAL_LOAD_DERIVATIVE_CACHE[key] = infos, used_dispatch_keys + + return _GLOBAL_LOAD_DERIVATIVE_CACHE[key] + + +# TODO: Why is this going through CppSignatureGroup, that doesn't make sense... +@with_native_function +def cpp_arguments(f: NativeFunction) -> Sequence[Binding]: + sigs = CppSignatureGroup.from_native_function(f, method=False) + if sigs.symint_signature is not None: + return sigs.symint_signature.arguments() + else: + return sigs.signature.arguments() + + +def create_derivative( + f: NativeFunction, + formula: str, + var_names: Tuple[str, ...], + available_named_gradients: Sequence[str], +) -> Derivative: + original_formula = formula + arguments: List[NamedCType] = [ + a.nctype.remove_const_ref() for a in cpp_arguments(f) + ] + + return_names = tuple(n if n != "self" else "result" for n in cpp.return_names(f)) + return_types = tuple( + cpp.return_type(r, symint=True).remove_const_ref() for r in f.func.returns + ) + + named_returns = [ + NamedCType(name, type) for name, type in zip(return_names, return_types) + ] + + formula, saved_inputs = saved_variables(formula, arguments, var_names) + formula, saved_outputs = saved_variables(formula, named_returns, var_names) + + used_named_gradients = { + name + for name in available_named_gradients + if re.search(IDENT_REGEX.format(name), formula) + } + + # Check that the referenced derivatives in the formula are in bounds + for i in used_gradient_indices(formula): + if i >= len(f.func.returns): + raise RuntimeError( + f"Out of bounds grads access: derivative formula for {cpp.name(f.func)} " + f"used grads[{i}], but the forward only returns {len(f.func.returns)} outputs." + ) + + return Derivative( + formula=formula, + original_formula=original_formula, + var_names=var_names, + saved_inputs=saved_inputs, + saved_outputs=saved_outputs, + named_gradients=used_named_gradients, + ) + + +def create_forward_derivative( + f: NativeFunction, formula: str, names: Tuple[str, ...] +) -> ForwardDerivative: + var_names = names + var_types: Optional[Tuple[Type, ...]] = None + for r in f.func.returns: + if r.name in var_names: + if var_types is None: + var_types = tuple() + var_types = var_types + (r.type,) + + # Handle default return names + if var_types is None: + if var_names == ("result",): + assert len(f.func.returns) == 1 + var_types = (f.func.returns[0].type,) + else: + for var_name in var_names: + res = re.findall(r"^result(\d+)$", var_name) + if len(res) == 1: + if var_types is None: + var_types = tuple() + arg_idx = int(res[0]) + var_types = var_types + (f.func.returns[arg_idx].type,) + + assert var_types is not None, "No matching output for forward derivative definition" + return ForwardDerivative( + formula=formula, + var_names=var_names, + var_types=var_types, + required_inputs_fw_grad=None, + required_inputs_primal=None, + required_original_self_value=False, + is_reusing_outplace_formula=False, + ) + + +def postprocess_forward_derivatives( + f: NativeFunction, + defn_name: str, + all_arg_names: List[str], + derivatives: List[Derivative], + forward_derivatives: List[ForwardDerivative], + args_with_derivatives: Sequence[Binding], +) -> List[ForwardDerivative]: + def find_required_inputs(formula: str, postfix: str) -> Tuple[str, ...]: + is_foreach = f.func.name.name.base.startswith("_foreach_") + required_inputs = set() + for arg in args_with_derivatives: + if ( + arg.type in ("at::TensorList", "const at::ITensorListRef &") + and not is_foreach + ): + # The functions taking TensorList handle everything internally + continue + arg_name = arg.name + + found = re.search(IDENT_REGEX.format(arg_name), formula) + if found: + raise RuntimeError( + f"The forward formula for {defn_name} is using the base name of the {arg_name} " + f"argument which is ambiguous. You should use {arg_name}_p to access the primal " + f"value and {arg_name}_t to access the tangent." + ) + + found = re.search(IDENT_REGEX.format(arg_name + postfix), formula) + if found: + required_inputs.add(arg_name) + + return tuple(required_inputs) + + updated_derivatives: List[ForwardDerivative] = [] + + for defn in forward_derivatives: + formula = defn.formula + required_inputs_tangent = find_required_inputs(formula, "_t") + if formula == "auto_element_wise": + assert ( + f.func.kind() != SchemaKind.inplace + ), f"Cannot use auto_element_wise with {f.func.name} because it is an in-place variant" + if ( + (not len(args_with_derivatives) == 1) + or len(forward_derivatives) > 1 + or len(forward_derivatives[0].var_names) > 1 + ): + raise RuntimeError( + f"Derivative definition of {defn_name} in derivatives.yaml defines the " + "forward definition of gradient as element_wise but this only " + "works for functions with a single differentiable input and a " + "single differentiable output." + ) + if not len(derivatives) == 1: + raise RuntimeError( + f"Derivative definition of {defn_name} in derivatives.yaml defines the " + "forward definition of gradient as element_wise but it does not " + "defines the gradient formula for its argument which is required." + ) + # This transformation is based on the observation that for element-wise functions, the Jacobian + # matrix is diagonal and thus doing J * v is the same as (v^T J)^T (in practice, we ignore the transpositions) + # For the complex case, we use hermitian transpose and get (v.conj() J).conj() + # So here we are going to re-use the backward formula and replace two things: + # 1) all occurrences of "grad" with "foo_t.conj()", where foo is the name of the unique differentiable input. + # 2) all usage of an original input "foo" with its primal value "foo_p". + # 3) conjugate the final result + # For example, for abs, the backward formula is: + # grad * self.sgn() + # And this function generates a forward formula that is: + # (self_t.conj() * self_p.sgn()).conj() + + backward_formula = derivatives[0].original_formula + input_name = args_with_derivatives[0].name + + # Do replacement 1) of the grad + def repl(m: Any) -> str: + return f"{m.group(1)}{input_name}_t.conj(){m.group(2)}" + + fw_formula = re.sub(IDENT_REGEX.format("grad"), repl, backward_formula) + + # Do replacement 2) of the input variables + for arg in args_with_derivatives: + arg_name = arg.name + + def repl(m: Any) -> str: + return f"{m.group(1)}{arg_name}_p{m.group(2)}" + + fw_formula = re.sub(IDENT_REGEX.format(arg_name), repl, fw_formula) + + # Do the final conjugate 3) + fw_formula = f"({fw_formula}).conj()" + + # Since there is a single differentiable inputs and we necessarily need its tangent we can + # simply require all differentiable input's tangent. + required_inputs_tangent = tuple(all_arg_names) + formula = fw_formula + elif formula == "auto_linear": + if ( + len(forward_derivatives) > 1 + or len(forward_derivatives[0].var_names) > 1 + ): + raise RuntimeError( + f"Derivative definition of {defn_name} in derivatives.yaml defines the " + "forward definition of gradient as linear but this only works " + "for functions with a single differentiable output." + ) + # This transformation is based on the observation that linear functions can be written as: + # y = f(x) = A * x + # For some matrix A and the Jacobian of the function f is also A. + # So doing J * v = A * v = f(v). + # Hence to do the jvp, we simply need to evaluate the function at the point v instead of x. + # We do this by calling the forward again by replacing any occurrence of the differentiable + # input "foo" by it's tangent "foo_t". + # Note that multiple inputs are not a problem as long as the function is truly linear wrt to + # the vector where all the differentiable inputs are stacked. + + diff_arg_names = [arg.name for arg in args_with_derivatives] + assert len(diff_arg_names) > 0 + + # Do replacement of input variables + new_args = [] + for arg_name in all_arg_names: + if arg_name in diff_arg_names: + arg_name = arg_name + "_t" + new_args.append(arg_name) + + # TODO we are trolling + if f.func.has_symint(): + defn_name += "_symint" + + # Call into the forward again. We need two cases here to handle both Tensor methods and at:: functions. + if Variant.function in f.variants: + fw_formula = f"at::{defn_name}({', '.join(new_args)})" + else: + assert Variant.method in f.variants + fw_formula = f"{new_args[0]}.{defn_name}({', '.join(new_args[1:])})" + + # All of the input tangents are always used so all of them are required here. + required_inputs_tangent = tuple(diff_arg_names) + formula = fw_formula + + # At this point, the formula is final and is not modified anymore. + + # During forward formula, we use the primal instead of the input Tensors. + # This call inspects the formula to find for which input's primal are used. + required_inputs_primal = find_required_inputs(formula, "_p") + + updated_derivatives.append( + ForwardDerivative( + formula=formula, + var_names=defn.var_names, + var_types=defn.var_types, + required_inputs_fw_grad=required_inputs_tangent, + required_inputs_primal=required_inputs_primal, + required_original_self_value=False, + is_reusing_outplace_formula=False, + ) + ) + + return updated_derivatives + + +def is_forward_derivative_definition( + all_arg_names: List[str], names: Tuple[str, ...] +) -> bool: + for name in names: + if name not in all_arg_names: + return True + else: + return False + raise RuntimeError("Expected `names` to be non-empty") + + +def create_differentiability_info( + defn_dict: Dict[Any, Any], + functions_by_signature: Dict[FunctionSchema, List[NativeFunction]], + functions_by_schema: Dict[str, NativeFunction], + op_counter: Counter[str], + used_dispatch_keys: Set[str], +) -> Tuple[FunctionSchema, Dict[str, DifferentiabilityInfo]]: + """Processes a single entry `defn` in derivatives.yaml""" + + def canonical_function( + functions: Sequence[NativeFunction], name: str + ) -> NativeFunction: + for f in functions: + if ( + not f.func.is_functional_fn() + and not f.func.is_out_fn() + and name == str(f.func.name.name) + ): + return f + # some functions only have in-place variants + assert name + "_" == cpp.name(functions[0].func) + return functions[0] + + def split_names(raw_names: str) -> Tuple[str, ...]: + """Given "foo, bar", return ["foo", "bar"].""" + return tuple(x.strip() for x in raw_names.split(",")) + + def check_grad_usage(defn_name: str, derivatives: Sequence[Derivative]) -> None: + """ + Check for some subtle mistakes one might make when writing derivatives. + These mistakes will compile, but will be latent until a function is + used with double backwards. + """ + + uses_grad = False # true if any derivative uses "grad" + num_grads_uses = 0 # count of uses of "grads" or "grads[INDEX]" + uses_named_grads = False # true if any derivative uses "grad_{name}" + used_grads_indices: List[int] = [] # which indices of grads are used + for d in derivatives: + formula = d.formula + uses_grad = uses_grad or bool( + re.findall(IDENT_REGEX.format("grad"), formula) + ) + num_grads_uses += len(re.findall(IDENT_REGEX.format("grads"), formula)) + uses_named_grads = uses_named_grads or bool(d.named_gradients) + used_grads_indices.extend(used_gradient_indices(formula)) + # This is a basic sanity check: the number of places we see + # "grads" should be no fewer than the number of indices we see + # inside "grads". They may not be equal because we may use + # "grads" without an index. + assert num_grads_uses >= len(used_grads_indices) + # Thus if the number is equal, every use of grads is also + # indexed. + only_used_grads_indices = num_grads_uses == len(used_grads_indices) + + if uses_grad and num_grads_uses > 0: + raise RuntimeError( + f"Derivative definition of {defn_name} in derivatives.yaml illegally " + "mixes use of 'grad' and 'grads'. Consider replacing " + "occurrences of 'grad' with 'grads[0]'" + ) + + if only_used_grads_indices and set(used_grads_indices) == {0}: + raise RuntimeError( + f"Derivative definition of {defn_name} in derivatives.yaml solely " + "refers to 'grads[0]'. If the first output is indeed the " + "only differentiable output, replace 'grads[0]' with 'grad'; " + "otherwise, there is a likely error in your derivatives " + "declaration." + ) + + if uses_named_grads and (uses_grad or num_grads_uses > 0): + raise RuntimeError( + f"Derivative definition of {defn_name} in derivatives.yaml illegally " + 'mixes use of "grad_RETURN_NAME" and "grad" or "grads[x]". Use ' + "only one method for identifying gradients." + ) + + @with_native_function + def set_up_derivatives( + f: NativeFunction, + ) -> Tuple[ + Sequence[Derivative], + Sequence[ForwardDerivative], + Sequence[Binding], + Sequence[str], + Sequence[str], + ]: + # Set up the derivative information + derivatives: List[Derivative] = [] + forward_derivatives: List[ForwardDerivative] = [] + non_differentiable_arg_names: List[str] = [] + args_with_derivatives_set: Set[str] = set() + + all_arg_names = [a.name for a in cpp_arguments(f)] + all_ret_names = [ + r.name for r in f.func.returns + ] # only used for the assert below + # output_differentiability is captured from the enclosed + # scope. Don't modify it. + # + # If it is not present, then no output is explicitly + # undifferentiable. + # + # It may be present and shorter than the length of return + # values. If that's the case, any return value that does not + # have a corresponding entry is considered not differentiable. + differentiability = output_differentiability or [True] * len(f.func.returns) + # A return is available as a named gradient ... + available_named_gradients = [ + f"grad_{ret.name}" + for ret, differentiable in zip(f.func.returns, differentiability) + # if it has not been explicitly made undifferentiable + if differentiable + # and if it has a name + and ret.name is not None + # and if its type is differentiable + and ret.type.is_tensor_like() + ] + + for raw_names in sorted(defn.keys()): + formula = defn[raw_names] + names = split_names(raw_names) + + for name in names: + assert not (name in all_arg_names and name in all_ret_names), ( + f"While processing the derivative formula for '{f.func.name}' wrt '{name}', " + f"expected '{name}' to not be both an input arg and named return. " + ) + + if is_forward_derivative_definition(all_arg_names, names): + forward_derivatives.append(create_forward_derivative(f, formula, names)) + else: + if formula.lower().strip() == "non_differentiable": + non_differentiable_arg_names += names + else: + derivative = create_derivative( + f, formula, names, available_named_gradients + ) + derivatives.append(derivative) + args_with_derivatives_set |= set(names) + + overlap = args_with_derivatives_set.intersection(non_differentiable_arg_names) + if overlap: + raise RuntimeError( + f"derivatives definition for {defn} have overlapped non_differentiable " + f"and differentiable variables: {overlap}" + ) + + # Next, let us determine the list of inputs in order. + # TODO: do we need eagerly calculate and save it here? Can it be derived + # from NativeFunction and `derivatives` on callsites instead? + args_with_derivatives = [ + a for a in cpp_arguments(f) if a.name in args_with_derivatives_set + ] + + # Postprocess forward derivatives definitions now that we know the differentiable arguments + forward_derivatives = postprocess_forward_derivatives( + f, + defn_name, + all_arg_names, + derivatives, + forward_derivatives, + args_with_derivatives, + ) + + # Test to see if the use of 'grads' makes sense. + check_grad_usage(defn_name, derivatives) + + return ( + derivatives, + forward_derivatives, + args_with_derivatives, + non_differentiable_arg_names, + available_named_gradients, + ) + + # NB: Removes 'name' from defn dictionary + specification = defn_dict.pop("name") + defn_name, _ = split_name_params(specification) + # NB: Removes 'output_differentiability' from defn dictionary + # `None` means all differentiable. + output_differentiability = defn_dict.pop("output_differentiability", None) + output_differentiability_conditions = None + if output_differentiability and any( + isinstance(diff, str) for diff in output_differentiability + ): + if len(output_differentiability) != 1: + raise RuntimeError( + f"Not supported: for {specification}," + f"output_differentiability must either be " + f"List[bool] or a List[str] where each str is a " + f"condition. In the case where it is a condition, " + f"we only support single-output functions. " + f"Please file us an issue. " + ) + output_differentiability_conditions = output_differentiability + output_differentiability = [True] + + schema_function = functions_by_schema.get(specification) + if not schema_function: + avail = "\n".join( + k for k, v in functions_by_schema.items() if cpp.name(v.func) == defn_name + ) + raise RuntimeError( + f"could not find ATen function for schema: {specification} " + f". Available signatures:\n{avail}" + ) + + # now map this to the legacy schema; this isn't technically necessary, but we'd need some logic here + # to map in-place schemas to the out-of-place variants. + # TODO: maybe the logic to handle the legacy schema is no longer necessary? + signature = schema_function.func.signature() + functions = functions_by_signature[signature] + if len(functions) == 0: + avail = "\n".join( + str(k) + for k, v in functions_by_signature.items() + if cpp.name(k) == defn_name + ) + raise RuntimeError( + f"could not find ATen function for legacy signature: {signature} " + f"corresponding to schema {specification}. Please report a bug to PyTorch. " + f"Available signatures:\n{avail}" + ) + + canonical = canonical_function(functions, defn_name) + if "grad_input_mask" in (a.name for a in cpp_arguments(canonical)): + raise RuntimeError( + f"Schema for {defn_name} has an argument named grad_input_mask, " + "but this name would be shadowed by our codegen. " + "Please use a different name in native_functions.yaml." + ) + + if "result" in (a.name for a in cpp_arguments(canonical)): + raise RuntimeError( + f"Schema for {defn_name} has an argument named result, " + "but this is only allowed for outputs." + "Please use a different name in native_functions.yaml." + ) + + diffinfo_dict = {} + for key, defn in defn_dict["dispatch"].items(): + if key != "Default" and key not in _VALID_AUTOGRAD_KEYS: + raise RuntimeError( + f"Invalid dispatch key {key} in derivatives.yaml for {specification}," + f" expected key to be one of {_VALID_AUTOGRAD_KEYS}" + ) + if key not in used_dispatch_keys: + used_dispatch_keys.add(key) + + ( + derivatives, + forward_derivatives, + args_with_derivatives, + non_differentiable_arg_names, + available_named_gradients, + ) = set_up_derivatives(canonical) + + used_named_gradients: Set[str] = set() + for d in derivatives: + used_named_gradients |= d.named_gradients + + # only assign an op name if we are actually going to calculate a derivative + op = None + if args_with_derivatives: + op_prefix = _create_op_prefix(defn_name) + if key != "Default": + op_prefix = op_prefix + key + op = f"{op_prefix}{op_counter[op_prefix]}" + op_counter[op_prefix] += 1 + + diffinfo_dict[key] = DifferentiabilityInfo( + name=defn_name, + func=canonical, + op=op, + derivatives=derivatives, + forward_derivatives=forward_derivatives, + all_saved_inputs=dedup_vars( + [v for d in derivatives for v in d.saved_inputs] + ), + all_saved_outputs=dedup_vars( + [v for d in derivatives for v in d.saved_outputs] + ), + available_named_gradients=available_named_gradients, + used_named_gradients=used_named_gradients, + args_with_derivatives=args_with_derivatives, + non_differentiable_arg_names=non_differentiable_arg_names, + output_differentiability=output_differentiability, + output_differentiability_conditions=output_differentiability_conditions, + ) + + return canonical.func, diffinfo_dict + + +GRAD_INDEX_REGEX = r"(?:^|\W)grads\[(\d+)\]" + + +def used_gradient_indices(formula: str) -> List[int]: + """Determine a list of gradient indices (the i in grads[i]) that + are used by the formula. + + >>> used_gradient_indices("foo(grads[0], grads[1])") + [0, 1] + """ + return [int(i) for i in re.findall(GRAD_INDEX_REGEX, formula)] + + +def saved_variables( + formula: str, + nctypes: List[NamedCType], + var_names: Tuple[str, ...], +) -> Tuple[str, Tuple[SavedAttribute, ...]]: + def stride_expr(name: str) -> str: + assert var_names == (name,), ( + 'Replacement for ".strides()" is currently only supported for single derivatives of the same tensor ' + 'that ".strides()" is being called on.' + ) + return f'strides_or_error({name}, "{name}")' + + REPLACEMENTS: List[Tuple[str, Dict[str, Any]]] = [ + # replace self.sym_sizes() with self_sym_sizes + ( + r"{}.sym_sizes\(\)", + { + "suffix": "_sym_sizes", + "nctype": lambda name: NamedCType(name, BaseCType(symIntArrayRefT)), + }, + ), + # replace self->sym_sizes() with self_sym_sizes_opt + ( + r"{}->sym_sizes\(\)", + { + "suffix": "_sym_sizes_opt", + "nctype": lambda name: NamedCType( + name, OptionalCType(BaseCType(symIntArrayRefT)) + ), + "expr": lambda name: f"{name}.has_value() ? c10::optional({name}->sym_sizes()) : c10::nullopt", + }, + ), + # replace self.sym_blocksize() with self_sym_blocksize_opt + ( + r"{}.sym_blocksize\(\)", + { + "suffix": "_self_sym_blocksize_opt", + "nctype": lambda name: NamedCType( + name, OptionalCType(BaseCType(symIntArrayRefT)) + ), + "expr": lambda name: f"at::sparse_csr::getSymIntBlockSize({name})", + }, + ), + # replace self.options() with self_options + ( + r"{}.options\(\)", + { + "suffix": "_options", + "nctype": lambda name: NamedCType(name, BaseCType(tensorOptionsT)), + }, + ), + # replace zeros_like(self) with self_info + ( + r"zeros_like\({}\)", + { + "suffix": "_info", + "nctype": lambda name: NamedCType(name, BaseCType(typeAndSizeT)), + "expr": lambda name: name, # at save-time + "res": lambda name: name + "_info.zeros()", # at eval-time + }, + ), + # replace self.sym_size(2) with self_sym_size_2 + ( + r"{}.sym_size\((-?\w+)\)", + { + "suffix": lambda m: f"_sym_argsize_{m.groups()[0].replace('-', 'minus_')}", + "nctype": lambda name: NamedCType(name, BaseCType(SymIntT)), + }, + ), + # replace self.numel() with self_numel + ( + r"{}.numel\(\)", + { + "suffix": "_numel", + "nctype": lambda name: NamedCType(name, BaseCType(longT)), + }, + ), + # replace self.sym_numel() with self_sym_numel + ( + r"{}.sym_numel\(\)", + { + "suffix": "_sym_numel", + "nctype": lambda name: NamedCType(name, BaseCType(SymIntT)), + }, + ), + # replace to_args_sizes(self) with self_args_sizes + ( + r"to_args_sizes\({}\)", + { + "suffix": "_args_sizes", + "nctype": lambda name: NamedCType( + name, VectorCType(VectorCType(BaseCType(longT))) + ), + }, + ), + # replace to_args_sizes_symint(self) with self_args_sizes + ( + r"to_args_sizes_symint\({}\)", + { + "suffix": "_args_sizes_symint", + "nctype": lambda name: NamedCType( + name, VectorCType(VectorCType(BaseCType(SymIntT))) + ), + }, + ), + # replace to_args_scalartypes(self) with self_args_scalartypes + ( + r"to_args_scalartypes\({}\)", + { + "suffix": "_args_scalartypes", + "nctype": lambda name: NamedCType( + name, VectorCType(BaseCType(scalarTypeT)) + ), + }, + ), + # replace TensorGeometry(self) with self_geometry + ( + r"TensorGeometry\({}\)", + { + "suffix": "_geometry", + "nctype": lambda name: NamedCType(name, BaseCType(tensorGeometryT)), + }, + ), + ( + r"{}.scalar_type\(\)", + { + "suffix": "_scalar_type", + "nctype": lambda name: NamedCType(name, BaseCType(scalarTypeT)), + }, + ), + # replace self.dim() with self_dim + ( + r"{}.dim\(\)", + { + "suffix": "_dim", + "nctype": lambda name: NamedCType(name, BaseCType(longT)), + }, + ), + # replace self.sym_strides() with self_sym_strides + ( + r"{}.sym_strides\(\)", + { + "suffix": "_sym_strides", + "nctype": lambda name: NamedCType(name, BaseCType(symIntArrayRefT)), + "expr": stride_expr, + }, + ), + # replace self.layout() with self_layout + ( + r"{}.layout\(\)", + { + "suffix": "_layout", + "nctype": lambda name: NamedCType(name, BaseCType(layoutT)), + }, + ), + # replace self.is_conj() with self_conjugate + ( + r"{}.is_conj\(\)", + { + "suffix": "_conjugate", + "nctype": lambda name: NamedCType(name, BaseCType(boolT)), + }, + ), + ] + + # find which arguments need to be saved + saved: List[SavedAttribute] = [] + + if ".sizes()" in formula or "->sizes()" in formula: + raise RuntimeError( + ".sizes() is not supported in derivative formulas. Instead, please use the SymInt version," + + f".sym_sizes(), which returned a c10::SymIntArrayRef. formula={formula}" + ) + if re.search(r"\.size\([-]?\d+\)", formula) or re.search( + r"->size\([-]?\d+\)", formula + ): + raise RuntimeError( + ".size(int) is not supported in derivative formulas. Instead, please use the SymInt version," + + f".sym_size(int), which returned a c10::SymIntArrayRef. formula={formula}" + ) + if ".strides()" in formula or "->strides()" in formula: + raise RuntimeError( + ".strides() is not supported in derivative formulas. Instead, please use the SymInt version," + + f".sym_strides(), which returned a c10::SymIntArrayRef. formula={formula}" + ) + for nctype in nctypes: + name = ( + nctype.name.name if isinstance(nctype.name, SpecialArgName) else nctype.name + ) + # First search the formula for expressions which can be evaluated + # when the autograd Function is created to avoid saving variables + for regex, info in REPLACEMENTS: + + def repl(m: Match[str]) -> str: + suffix: str = ( + info["suffix"](m) if callable(info["suffix"]) else info["suffix"] + ) + expr: str = info["expr"](name) if "expr" in info else m.group(0) + saved.append( + SavedAttribute( + nctype=info["nctype"](name + suffix), + expr=expr, + ) + ) + if "res" in info: + replacement: str = info["res"](name) + return replacement + return name + suffix + + formula = re.sub(regex.format(name), repl, formula) + + # c10::optional types stored in Backward nodes must be + # converted to c10::optional before being passed into + # the backward function + if nctype.type == OptionalCType(BaseCType(stringT)): + formula = re.sub( + rf"\b{name}\b", + f"{name}.has_value() ? c10::optional({name}.value()) : c10::nullopt", + formula, + ) + + # Find any variables which remain in the formula and save them + if re.search(IDENT_REGEX.format(name), formula): + saved.append( + SavedAttribute( + nctype=nctype, + expr=name, + ) + ) + + return formula, tuple(saved) + + +def _create_op_prefix(name: str) -> str: + """Takes a native function name converts to a op prefix name. + + Note that the "name" parameter must be the native function name + without the optional variant suffix, so "add" instead of + "add.out". + + OP names correspond to classes, hence the change to title case. + + Example:: + >>> _create_op_prefix('add') + 'AddBackward' + """ + camel_case = "".join([p.title() for p in name.split("_")]) + return (camel_case + "Backward").replace("ForwardBackward", "Backward") + + +def dedup_vars(vars: Sequence[SavedAttribute]) -> Sequence[SavedAttribute]: + seen: Set[str] = set() + saved: List[SavedAttribute] = [] + for var in vars: + name = ( + var.nctype.name.name + if isinstance(var.nctype.name, SpecialArgName) + else var.nctype.name + ) + if name in seen: + continue + seen.add(name) + saved.append(var) + return saved diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/ADInplaceOrViewType.cpp b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/ADInplaceOrViewType.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0aa89c5e6d3dfd178aa0ab5efb91362d6900472c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/ADInplaceOrViewType.cpp @@ -0,0 +1,35 @@ +#define TORCH_ASSERT_ONLY_METHOD_OPERATORS +#include "torch/csrc/autograd/VariableTypeUtils.h" + +#include + +// ${generated_comment} + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +$ops_headers +#endif + +using namespace at; +using torch::autograd::CreationMeta; +using torch::autograd::as_view; +using torch::autograd::increment_version; + +namespace torch { + +namespace ADInplaceOrView { + +namespace { +${inplace_or_view_method_definitions} +} // namespace +} // namespace ADInplaceOrView + +namespace { + +TORCH_LIBRARY_IMPL(aten, ADInplaceOrView, m) { + ${inplace_or_view_wrapper_registrations}; +} + +} // namespace +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/Functions.cpp b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/Functions.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5bc089f67df74b300bc8de6568b702d48e0cb6c2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/Functions.cpp @@ -0,0 +1,20 @@ +#include "torch/csrc/autograd/FunctionsManual.h" +#include "torch/csrc/dynamo/compiled_autograd.h" + +// ${generated_comment} + +// The manual function definitions that used to be here are now in torch/csrc/autograd/FunctionsManual.cpp +// This speeds up re-compilation and allow to share these implementations so that they can be +// used for forward mode AD formulas as well. + +using namespace torch::autograd::generated::details; +using at::Tensor; +using at::Scalar; +using at::IntArrayRef; +using at::TensorList; + +namespace torch::autograd::generated { + +${autograd_function_definitions} + +} // namespace torch::autograd::generated diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/Functions.h b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/Functions.h new file mode 100644 index 0000000000000000000000000000000000000000..437a5e8e898895b2c1f113d19bdf34c6e34b1ea8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/Functions.h @@ -0,0 +1,51 @@ +#pragma once + +// ${generated_comment} + +#include +#include +#include + +#include "torch/csrc/autograd/function.h" +#include "torch/csrc/autograd/variable.h" +#include "torch/csrc/autograd/saved_variable.h" +#include + +#include + +namespace torch { namespace autograd { namespace generated { + +using at::Scalar; +using at::Tensor; +using at::IntArrayRef; +using at::ArrayRef; +using at::Type; +using at::TensorGeometry; +using at::ScalarType; +using c10::optional; +using c10::fmap; + +inline std::vector unpack_list(at::ArrayRef xs, std::shared_ptr saved_for = nullptr) { + // NB: we must explicitly do the conversion in the lambda, otherwise template + // deduction will give a Tensor of Variable which is not convertible + return fmap(xs, [&saved_for](const SavedVariable& x) { + // TODO(crcrpar): Use `std::move(saved_for)` to avoid incrementing refcount, which would need refactoring. + return static_cast(x.unpack(saved_for)); + }); +} + +inline c10::List> unpack_opt_list(at::ArrayRef xs, std::shared_ptr saved_for = nullptr) { + torch::List> result; + result.reserve(xs.size()); + for (const SavedVariable& v : xs) { + auto var = v.unpack(saved_for); + result.push_back(var.defined() ? c10::optional(var) : c10::nullopt); + } + return result; +} + +using torch::autograd::TypeAndSize; + +${autograd_function_declarations} + +}}} // namespace torch::autograd::generated diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/TraceType.cpp b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/TraceType.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fb5e7ae44a5353a3cc2a90858fe33b7fc0ef8bfd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/TraceType.cpp @@ -0,0 +1,40 @@ +#define TORCH_ASSERT_ONLY_METHOD_OPERATORS +#include "torch/csrc/jit/frontend/tracer.h" + +#include + +#include "torch/csrc/autograd/function.h" + +#include "ATen/quantized/Quantizer.h" + +// ${generated_comment} + +// See the `Tracer` section in `torch/csrc/jit/OVERVIEW.md`. +// NOTE See [Sharded File] comment in VariableType + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +$ops_headers +#endif + +using namespace at; + +namespace torch { + +namespace TraceType { + +namespace { +${trace_method_definitions} +} // namespace +} // namespace TraceType + +namespace { + +TORCH_LIBRARY_IMPL(aten, Tracer, m) { + ${trace_wrapper_registrations}; +} + +} // namespace + +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/VariableType.cpp b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/VariableType.cpp new file mode 100644 index 0000000000000000000000000000000000000000..08f1f8b698e528ca382ead2fb64ee0a45a708b08 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/VariableType.cpp @@ -0,0 +1,65 @@ +#include "torch/csrc/autograd/VariableTypeUtils.h" +#include "torch/csrc/autograd/generated/VariableType.h" +#include "torch/csrc/autograd/FunctionsManual.h" + +#include +#include +#include +#include + +#include + + +// ${generated_comment} + +// NOTE [Sharded File]: on this file's split-into-shards state +// +// Back in the good old days, VariableType.cpp was generated as one +// file with every function in it, and everything was great and +// simple. +// +// However, this file was also very large (over 36,000 lines), and +// compiling it was very slow, and in fact was a significant +// bottleneck for incremental rebuilds. To address this, we now +// generate the file split across multiple shards, named +// VariableType_0.cpp and so on, which can be compiled in parallel. +// +// For ease of inspection and debugging, so that it's not necessary to +// go rooting around in multiple files, we also generate all the +// functions together in VariableTypeEverything.cpp. This generated +// file is only for convenience; it's not actually used in the +// build. If the file you're looking at now is one of the shards, you +// may want to switch over to the Everything variant to make you +// grepping smoother. + +using namespace at; +using namespace torch::autograd::generated; +using namespace torch::autograd::generated::details; + + +namespace torch::autograd { + +namespace VariableType { +namespace{ + C10_UNUSED void reset_grad_accumulator(Variable & self) { + AutogradMeta* meta = torch::autograd::impl::get_autograd_meta(self); + if (meta != nullptr) { + meta->grad_accumulator_.reset(); + } + } +} + +namespace { + + +${type_derived_method_definitions} +} +} + +namespace { + +${wrapper_registrations} + +} + +} // namespace torch::autograd diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/VariableType.h b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/VariableType.h new file mode 100644 index 0000000000000000000000000000000000000000..5ff1a6dc567d3e1ac3bab707942631a72c642e2e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/VariableType.h @@ -0,0 +1,58 @@ +#pragma once + +// ${generated_comment} + +#include +#include + +#include + +#include +#include + +#include // for size_t +#include // for function +#include // for unique_ptr +#include +#include + +namespace at { + struct Quantizer; +}; + +namespace torch { namespace autograd { + +using Variable = at::Tensor; +using at::Context; +using at::Device; +using at::Dimname; +using at::DimnameList; +using at::Generator; +using at::IntArrayRef; +using at::MemoryFormat; +using at::QScheme; +using at::Scalar; +using at::ScalarType; +using at::Storage; +using at::Tensor; +using at::TensorList; +using at::TensorOptions; +using at::Quantizer; +// This is temporary typedef to enable Quantizer in aten native function API +// we'll remove them when we are actually exposing Quantizer class +// to frontend +using ConstQuantizerPtr = const c10::intrusive_ptr&; +using c10::optional; + +namespace VariableType { + TORCH_API std::vector allCUDATypes(); + TORCH_API std::vector allXPUTypes(); + TORCH_API std::vector allCPUTypes(); + + at::Tensor & unpack(Tensor & t, const char * name, int pos); + const at::Tensor & unpack(const Tensor & t, const char * name, int pos); + at::Tensor unpack_opt(const Tensor & t, const char * name, int pos); + std::vector unpack(const at::ITensorListRef& tl, const char *name, int pos); +}; + +}} // namespace torch::autograd diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/annotated_fn_args.py.in b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/annotated_fn_args.py.in new file mode 100644 index 0000000000000000000000000000000000000000..1012c008451745b8f1ed1454a864f666caf2618a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/annotated_fn_args.py.in @@ -0,0 +1,11 @@ +""" +This file is needed for generating procedural tests required for +testing __torch_function__. See tests/test_overrides.py. +""" + +# flake8: noqa +import torch + +annotated_args = { +${annotated_args} +} diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_enum_tag.cpp b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_enum_tag.cpp new file mode 100644 index 0000000000000000000000000000000000000000..83cfad1d7ba4d6fc3529caf78e036c5883e7bc23 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_enum_tag.cpp @@ -0,0 +1,15 @@ +#include +#include +#include +#include + +namespace py = pybind11; +namespace torch { + namespace autograd { + void initEnumTag(PyObject* module) { + auto m = py::handle(module).cast(); + py::enum_(m, "Tag") + ${enum_of_valid_tags}; + m.doc() = "An Enum that contains tags that can be assigned to an operator registered in C++."; + } +}} diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_fft_functions.cpp b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_fft_functions.cpp new file mode 100644 index 0000000000000000000000000000000000000000..34e6377b8057908cd5c0deb9ce9cb702b6e5b77b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_fft_functions.cpp @@ -0,0 +1,81 @@ +#define TORCH_ASSERT_ONLY_METHOD_OPERATORS +// ${generated_comment} + +#include "torch/csrc/Device.h" +#include "torch/csrc/DynamicTypes.h" +#include "torch/csrc/Exceptions.h" +#include "torch/csrc/autograd/python_fft_functions.h" +#include "torch/csrc/autograd/generated/python_return_types.h" +#include "torch/csrc/autograd/python_variable.h" +#include "torch/csrc/autograd/utils/wrap_outputs.h" +#include "torch/csrc/autograd/utils/python_arg_parsing.h" +#include "torch/csrc/autograd/generated/variable_factories.h" +#include "torch/csrc/utils/out_types.h" +#include "torch/csrc/utils/pycfunction_helpers.h" +#include "torch/csrc/utils/python_arg_parser.h" +#include "torch/csrc/utils/structseq.h" +#include "torch/csrc/utils/cuda_lazy_init.h" + +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +$ops_headers +#endif + +using at::Tensor; +using at::Device; +using at::Layout; +using at::Scalar; +using at::ScalarType; +using at::Backend; +using at::OptionalDeviceGuard; +using at::DeviceGuard; +using at::TensorOptions; +using at::IntArrayRef; +using at::Generator; +using at::TensorList; +using at::Dimname; +using at::DimnameList; + +using torch::utils::check_out_type_matches; +using namespace torch::autograd::utils; + +namespace torch::autograd { + +// generated forward declarations start here + +${py_forwards} + +static PyMethodDef fft_functions[] = { + ${py_method_defs} + {NULL} +}; + +static PyObject* THPFFTVariableFunctionsModule = NULL; + +void initFFTFunctions(PyObject* module) { + static struct PyModuleDef def = { + PyModuleDef_HEAD_INIT, + "torch._C._fft", + NULL, + -1, + fft_functions + }; + PyObject* fft = PyModule_Create(&def); + THPFFTVariableFunctionsModule = fft; + if (!fft) { + throw python_error(); + } + // steals a reference to fft + if (PyModule_AddObject(module, "_fft", fft) != 0) { + throw python_error(); + } +} + +// generated methods start here + +${py_methods} + +} // namespace torch::autograd diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_functions.cpp b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_functions.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1522d6cd0f5a2a1fc0188bf9d6d0d59fe1b27d85 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_functions.cpp @@ -0,0 +1,37 @@ +#include + +// ${generated_comment} + +#include +#include + +#include +#include "torch/csrc/autograd/generated/Functions.h" +#include "torch/csrc/autograd/python_cpp_function.h" +#include +#include +#include +#include +#include + +// NOTE: See [Sharded File] comment in VariableType + +namespace torch::autograd::generated { + +template +static void addClass(PyObject* module, PyTypeObject& type, const char* name, + PyGetSetDef* function_properties=NULL, PyMethodDef* function_methods=NULL) +{ + _initFunctionPyTypeObject(type, name, function_properties, function_methods); + Py_INCREF(&type); + PyModule_AddObject(module, name, (PyObject*)&type); + registerCppFunction(typeid(C), &type); +} + +${py_function_props_and_getters} + +void initialize_autogenerated_functions${shard_id}(PyObject* module) { + ${py_function_initializers} +} + +} // namespace torch::autograd::generated diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_functions.h b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..22e37207e219431100fefaf21b02e3ed0f63d956 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_functions.h @@ -0,0 +1,17 @@ +#pragma once + +#include + +// ${generated_comment} + +// Python bindings for automatically generated autograd functions + +namespace torch { namespace autograd { namespace generated { + +${shard_forward_declare} + +inline void initialize_autogenerated_functions(PyObject* module) { + ${shard_call} +} + +}}} // namespace torch::autograd::generated diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_linalg_functions.cpp b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_linalg_functions.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c93752a3ddbfcf111426f98c3ea68fc625e94def --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_linalg_functions.cpp @@ -0,0 +1,68 @@ +#define TORCH_ASSERT_ONLY_METHOD_OPERATORS +// ${generated_comment} + +#include "torch/csrc/Device.h" +#include "torch/csrc/DynamicTypes.h" +#include "torch/csrc/Exceptions.h" +#include "torch/csrc/autograd/python_linalg_functions.h" +#include "torch/csrc/autograd/generated/python_return_types.h" +#include "torch/csrc/autograd/python_variable.h" +#include "torch/csrc/autograd/utils/wrap_outputs.h" +#include "torch/csrc/autograd/utils/python_arg_parsing.h" +#include "torch/csrc/utils/pycfunction_helpers.h" +#include "torch/csrc/utils/python_arg_parser.h" +#include "torch/csrc/utils/structseq.h" + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +$ops_headers +#endif + +using at::Tensor; +using at::Scalar; +using at::ScalarType; +using at::MemoryFormat; +using at::Generator; +using at::IntArrayRef; +using at::TensorList; + +using namespace torch::autograd::utils; + +namespace torch::autograd { + +// generated forward declarations start here + +${py_forwards} + +static PyMethodDef linalg_functions[] = { + ${py_method_defs} + {NULL} +}; + +static PyObject* THPLinalgVariableFunctionsModule = NULL; + +void initLinalgFunctions(PyObject* module) { + static struct PyModuleDef def = { + PyModuleDef_HEAD_INIT, + "torch._C._linalg", + NULL, + -1, + linalg_functions + }; + PyObject* linalg = PyModule_Create(&def); + THPLinalgVariableFunctionsModule = linalg; + if (!linalg) { + throw python_error(); + } + // steals a reference to linalg + if (PyModule_AddObject(module, "_linalg", linalg) != 0) { + throw python_error(); + } +} + +// generated methods start here + +${py_methods} + +} // namespace torch::autograd diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_nested_functions.cpp b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_nested_functions.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3b8e1e581ed454911d2560299ef7c8fab57438d2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_nested_functions.cpp @@ -0,0 +1,81 @@ +#define TORCH_ASSERT_ONLY_METHOD_OPERATORS +// ${generated_comment} + +#include "torch/csrc/Device.h" +#include "torch/csrc/DynamicTypes.h" +#include "torch/csrc/Exceptions.h" +#include "torch/csrc/autograd/python_nested_functions.h" +#include "torch/csrc/autograd/generated/python_return_types.h" +#include "torch/csrc/autograd/python_variable.h" +#include "torch/csrc/autograd/utils/wrap_outputs.h" +#include "torch/csrc/autograd/utils/python_arg_parsing.h" +#include "torch/csrc/autograd/generated/variable_factories.h" +#include "torch/csrc/utils/out_types.h" +#include "torch/csrc/utils/pycfunction_helpers.h" +#include "torch/csrc/utils/python_arg_parser.h" +#include "torch/csrc/utils/structseq.h" +#include "torch/csrc/utils/cuda_lazy_init.h" + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +$ops_headers +#endif + +using at::Tensor; +using at::Device; +using at::Layout; +using at::Scalar; +using at::ScalarType; +using at::Backend; +using at::OptionalDeviceGuard; +using at::DeviceGuard; +using at::TensorOptions; +using at::IntArrayRef; +using at::OptionalIntArrayRef; +using at::Generator; +using at::TensorList; +using at::Dimname; +using at::DimnameList; + +using namespace torch::autograd::utils; + +namespace torch::autograd { + +// generated forward declarations start here + +${py_forwards} + +static PyMethodDef nested_functions[] = { + {NULL, NULL, 0, NULL}, + ${py_method_defs} + {NULL} +}; + +static PyObject* THPNestedVariableFunctionsModule = NULL; + +void initNestedFunctions(PyObject* module) { + nested_functions[0] = get_nested_functions_manual()[0]; + static struct PyModuleDef def = { + PyModuleDef_HEAD_INIT, + "torch._C._nested", + NULL, + -1, + nested_functions + }; + PyObject* nested = PyModule_Create(&def); + THPNestedVariableFunctionsModule = nested; + if (!nested) { + throw python_error(); + } + // steals a reference to nested + if (PyModule_AddObject(module, "_nested", nested) != 0) { + throw python_error(); + } +} + +// generated methods start here + +${py_methods} + +} // namespace torch::autograd diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_nn_functions.cpp b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_nn_functions.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f311cfebe4c5f18c8b741cb03e8f7aaac05f3b0c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_nn_functions.cpp @@ -0,0 +1,113 @@ +#define TORCH_ASSERT_ONLY_METHOD_OPERATORS +// ${generated_comment} + +#include "torch/csrc/Device.h" +#include "torch/csrc/DynamicTypes.h" +#include "torch/csrc/Exceptions.h" +#include "torch/csrc/autograd/python_nn_functions.h" +#include "torch/csrc/autograd/generated/python_return_types.h" +#include "torch/csrc/autograd/python_variable.h" +#include "torch/csrc/autograd/utils/wrap_outputs.h" +#include "torch/csrc/autograd/utils/python_arg_parsing.h" +#include "torch/csrc/utils/pycfunction_helpers.h" +#include "torch/csrc/utils/python_arg_parser.h" +#include "torch/csrc/utils/structseq.h" +#include "torch/csrc/utils/tensor_memoryformats.h" + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +$ops_headers +#endif + +using at::Tensor; +using at::Scalar; +using at::MemoryFormat; +using at::Generator; +using at::IntArrayRef; +using at::ArrayRef; + +using namespace torch::autograd::utils; + +namespace torch::autograd { + +static PyObject* THPNNVariableFunctionsModule = NULL; + +static PyObject * THPVariable__parse_to(PyObject* module, PyObject* args, PyObject* kwargs) +{ + HANDLE_TH_ERRORS + static PythonArgParser parser({ + "to(Device device=None, ScalarType dtype=None, bool non_blocking=False, bool copy=False, *, MemoryFormat? memory_format=None)", + "to(ScalarType dtype, bool non_blocking=False, bool copy=False, *, MemoryFormat? memory_format=None)", + "to(Tensor tensor, bool non_blocking=False, bool copy=False, *, MemoryFormat? memory_format=None)", + }); + ParsedArgs<5> parsed_args; + auto r = parser.parse(args, kwargs, parsed_args); + if (r.has_torch_function()) { + return handle_torch_function(r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn", "_parse_to"); + } + auto parsed = parse_to_conversion(r, /*allow_copy*/ false); // we don't want copy for nn.Module.to + auto& device = std::get<0>(parsed); + auto& scalarType = std::get<1>(parsed); + auto non_blocking = std::get<2>(parsed); + auto opt_memory_format = std::get<4>(parsed); + auto tuple = THPObjectPtr{PyTuple_New(4)}; + if (!tuple) throw python_error(); + if (device) { + PyTuple_SET_ITEM(tuple.get(), 0, THPDevice_New(*device)); + } else { + Py_INCREF(Py_None); + PyTuple_SET_ITEM(tuple.get(), 0, Py_None); + } + if (scalarType) { + PyTuple_SET_ITEM(tuple.get(), 1, torch::autograd::utils::wrap(torch::getTHPDtype(*scalarType))); + } else { + Py_INCREF(Py_None); + PyTuple_SET_ITEM(tuple.get(), 1, Py_None); + } + PyTuple_SET_ITEM(tuple.get(), 2, torch::autograd::utils::wrap(non_blocking)); + if (opt_memory_format.has_value()) { + PyTuple_SET_ITEM(tuple.get(), 3, torch::utils::getTHPMemoryFormat(opt_memory_format.value())); + } else { + Py_INCREF(Py_None); + PyTuple_SET_ITEM(tuple.get(), 3, Py_None); + } + return tuple.release(); + END_HANDLE_TH_ERRORS +} + +// generated forward declarations start here + +${py_forwards} + +static PyMethodDef nn_functions[] = { + {"_parse_to", castPyCFunctionWithKeywords(THPVariable__parse_to), + METH_VARARGS | METH_KEYWORDS, nullptr}, + ${py_method_defs} + {NULL} +}; + +void initNNFunctions(PyObject* module) { + static struct PyModuleDef def = { + PyModuleDef_HEAD_INIT, + "torch._C._nn", + NULL, + -1, + nn_functions + }; + PyObject* nn = PyModule_Create(&def); + THPNNVariableFunctionsModule = nn; + if (!nn) { + throw python_error(); + } + // steals a reference to nn + if (PyModule_AddObject(module, "_nn", nn) != 0) { + throw python_error(); + } +} + +// generated methods start here + +${py_methods} + +} // namespace torch::autograd diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_return_types.cpp b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_return_types.cpp new file mode 100644 index 0000000000000000000000000000000000000000..139e6b8958336cfcc8328fa33581e9f1ab6d5532 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_return_types.cpp @@ -0,0 +1,52 @@ +#include + +#include +#include +#include + +#include "torch/csrc/autograd/generated/python_return_types.h" +#include "torch/csrc/utils/structseq.h" +#include "torch/csrc/Exceptions.h" + +namespace torch { namespace autograd { namespace generated { + +${py_return_types} + +}}} + +namespace torch::autograd { + +static void addReturnType( + PyObject* module, + const char* name, + PyTypeObject* type) { + // hold onto the TypeObject for the unlikely case of user + // deleting or overriding it. + Py_INCREF(type); + if (PyModule_AddObject( + module, + name, + (PyObject*)type) != 0) { + Py_DECREF(type); + throw python_error(); + } +} + +void initReturnTypes(PyObject* module) { + static struct PyModuleDef def = { + PyModuleDef_HEAD_INIT, "torch._C._return_types", nullptr, -1, {}}; + PyObject* return_types_module = PyModule_Create(&def); + if (!return_types_module) { + throw python_error(); + } + + ${py_return_types_registrations} + + // steals a reference to return_types on success + if (PyModule_AddObject(module, "_return_types", return_types_module) != 0) { + Py_DECREF(return_types_module); + throw python_error(); + } +} + +} // namespace torch::autograd diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_return_types.h b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_return_types.h new file mode 100644 index 0000000000000000000000000000000000000000..ce6c355ea146a272709255b898603764112168b9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_return_types.h @@ -0,0 +1,14 @@ +#pragma once + +namespace torch { +namespace autograd { +namespace generated { + +${py_return_types_declarations} + +} + +void initReturnTypes(PyObject* module); + +} // namespace autograd +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_sparse_functions.cpp b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_sparse_functions.cpp new file mode 100644 index 0000000000000000000000000000000000000000..648d91442102e9b950cb2ddb8db545c4b4e1100e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_sparse_functions.cpp @@ -0,0 +1,67 @@ +#define TORCH_ASSERT_ONLY_METHOD_OPERATORS +// ${generated_comment} + +#include "torch/csrc/Device.h" +#include "torch/csrc/DynamicTypes.h" +#include "torch/csrc/Exceptions.h" +#include "torch/csrc/autograd/python_sparse_functions.h" +#include "torch/csrc/autograd/python_variable.h" +#include "torch/csrc/autograd/utils/wrap_outputs.h" +#include "torch/csrc/autograd/utils/python_arg_parsing.h" +#include "torch/csrc/utils/pycfunction_helpers.h" +#include "torch/csrc/utils/python_arg_parser.h" +#include "torch/csrc/utils/structseq.h" + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +$ops_headers +#endif + +using at::Tensor; +using at::Scalar; +using at::ScalarType; +using at::MemoryFormat; +using at::Generator; +using at::IntArrayRef; +using at::TensorList; + +using namespace torch::autograd::utils; + +namespace torch::autograd { + +// generated forward declarations start here + +${py_forwards} + +static PyMethodDef sparse_functions[] = { + ${py_method_defs} + {NULL} +}; + +static PyObject* THPSparseVariableFunctionsModule = NULL; + +void initSparseFunctions(PyObject* module) { + static struct PyModuleDef def = { + PyModuleDef_HEAD_INIT, + "torch._C._sparse", + NULL, + -1, + sparse_functions + }; + PyObject* sparse = PyModule_Create(&def); + THPSparseVariableFunctionsModule = sparse; + if (!sparse) { + throw python_error(); + } + // steals a reference to sparse + if (PyModule_AddObject(module, "_sparse", sparse) != 0) { + throw python_error(); + } +} + +// generated methods start here + +${py_methods} + +} // namespace torch::autograd diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_special_functions.cpp b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_special_functions.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4cbcb790151c02b906d4da4a8eef47aa65f841e7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_special_functions.cpp @@ -0,0 +1,79 @@ +#define TORCH_ASSERT_ONLY_METHOD_OPERATORS +// ${generated_comment} + +#include "torch/csrc/Device.h" +#include "torch/csrc/DynamicTypes.h" +#include "torch/csrc/Exceptions.h" +#include "torch/csrc/autograd/python_special_functions.h" +#include "torch/csrc/autograd/generated/python_return_types.h" +#include "torch/csrc/autograd/python_variable.h" +#include "torch/csrc/autograd/utils/wrap_outputs.h" +#include "torch/csrc/autograd/utils/python_arg_parsing.h" +#include "torch/csrc/autograd/generated/variable_factories.h" +#include "torch/csrc/utils/out_types.h" +#include "torch/csrc/utils/pycfunction_helpers.h" +#include "torch/csrc/utils/python_arg_parser.h" +#include "torch/csrc/utils/structseq.h" +#include "torch/csrc/utils/cuda_lazy_init.h" + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +$ops_headers +#endif + +using at::Tensor; +using at::Device; +using at::Layout; +using at::Scalar; +using at::ScalarType; +using at::Backend; +using at::OptionalDeviceGuard; +using at::DeviceGuard; +using at::TensorOptions; +using at::IntArrayRef; +using at::Generator; +using at::TensorList; +using at::Dimname; +using at::DimnameList; + +using torch::utils::check_out_type_matches; +using namespace torch::autograd::utils; + +namespace torch::autograd { + +// generated forward declarations start here + +${py_forwards} + +static PyMethodDef special_functions[] = { + ${py_method_defs} + {NULL} +}; + +static PyObject* THPSpecialVariableFunctionsModule = NULL; + +void initSpecialFunctions(PyObject* module) { + static struct PyModuleDef def = { + PyModuleDef_HEAD_INIT, + "torch._C._special", + NULL, + -1, + special_functions + }; + PyObject* special = PyModule_Create(&def); + THPSpecialVariableFunctionsModule = special; + if (!special) { + throw python_error(); + } + // steals a reference to special + if (PyModule_AddObject(module, "_special", special) != 0) { + throw python_error(); + } +} + +// generated methods start here + +${py_methods} + +} // namespace torch::autograd diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_torch_functions.cpp b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_torch_functions.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1dbbfc18fe6e6344f514cdc442cc14a84cf4fb68 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_torch_functions.cpp @@ -0,0 +1,93 @@ +#define TORCH_ASSERT_ONLY_METHOD_OPERATORS +// ${generated_comment} + +// Python bindings for torch.* functions implemented through ATen. +// +// The functions are bound as static methods on a class +// torch._C._VariableFunctions which is also aliased as Variable._torch +// and also copied into 'torch' module. + +#include + +// Undefine the copysign macro so that at::copysign works as intended with MSVC +// https://github.com/python/cpython/blob/c60394c7fc9cc09b16e9675a3eeb5844b6d8523f/PC/pyconfig.h#L196 +#ifdef _MSC_VER +#undef copysign +#endif // _MSC_VER + +#include "torch/csrc/autograd/python_torch_functions.h" +#include "torch/csrc/autograd/python_variable.h" +#include "torch/csrc/autograd/utils/wrap_outputs.h" +#include "torch/csrc/Dtype.h" +#include "torch/csrc/DynamicTypes.h" +#include "torch/csrc/Exceptions.h" +#include "torch/csrc/utils/out_types.h" +#include "torch/csrc/utils/pybind.h" +#include "torch/csrc/utils/pycfunction_helpers.h" +#include "torch/csrc/utils/python_arg_parser.h" +#include "torch/csrc/utils/tensor_layouts.h" +#include "torch/csrc/utils/tensor_new.h" +#include "torch/csrc/utils/tensor_numpy.h" +#include "torch/csrc/jit/frontend/tracer.h" +#include "torch/csrc/autograd/generated/variable_factories.h" +#include "torch/csrc/utils/structseq.h" +#include "torch/csrc/utils/cuda_lazy_init.h" +#include "torch/csrc/autograd/generated/python_return_types.h" + +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +$ops_headers +#endif + +#include +#include +#include +#include + +using at::Tensor; +using at::Device; +using at::Layout; +using at::Scalar; +using at::ScalarType; +using at::Backend; +using at::OptionalDeviceGuard; +using at::DeviceGuard; +using at::TensorOptions; +using at::IntArrayRef; +using at::Generator; +using at::TensorList; +using at::Dimname; +using at::DimnameList; +using at::ArrayRef; + +using torch::utils::check_out_type_matches; +using namespace torch::autograd::utils; + +// NOTE: See [Sharded File] comment in VariableType + +namespace torch::autograd { + +// generated forward declarations start here + +${py_forwards} + +static PyMethodDef torch_functions_shard[] = { + ${py_method_defs} +}; + +void gatherTorchFunctions${shard_id}(std::vector &torch_functions) { + constexpr size_t num_functions = sizeof(torch_functions_shard) / sizeof(torch_functions_shard[0]); + torch_functions.insert( + torch_functions.end(), + torch_functions_shard, + torch_functions_shard + num_functions); +} + +// generated methods start here + +${py_methods} + +} // namespace torch::autograd diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_variable_methods.cpp b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_variable_methods.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b9e0c43283aecaa5593459941c509cbeb7e5ecf2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/python_variable_methods.cpp @@ -0,0 +1,1278 @@ +#define TORCH_ASSERT_ONLY_METHOD_OPERATORS +// ${generated_comment} + +#include + +// Undefine the copysign macro so that at::copysign works as intended with MSVC +// https://github.com/python/cpython/blob/c60394c7fc9cc09b16e9675a3eeb5844b6d8523f/PC/pyconfig.h#L196 +#ifdef _MSC_VER +#undef copysign +#endif // _MSC_VER + +#include "torch/csrc/DynamicTypes.h" +#include "torch/csrc/Exceptions.h" +#include "torch/csrc/Size.h" +#include "torch/csrc/autograd/generated/VariableType.h" +#include "torch/csrc/autograd/python_variable.h" +#include "torch/csrc/autograd/utils/python_arg_parsing.h" +#include "torch/csrc/autograd/utils/error_messages.h" +#include "torch/csrc/autograd/utils/wrap_outputs.h" +#include "torch/csrc/jit/frontend/tracer.h" +#ifdef USE_CUDA +#include "torch/csrc/cuda/Event.h" +#endif +#include "torch/csrc/utils/cuda_lazy_init.h" +#include "torch/csrc/utils/object_ptr.h" +#include "torch/csrc/utils/pycfunction_helpers.h" +#include "torch/csrc/utils/python_arg_parser.h" +#include "torch/csrc/utils/python_numbers.h" +#include "torch/csrc/utils/python_strings.h" +#include "torch/csrc/utils/python_tuples.h" +#include "torch/csrc/utils/tensor_apply.h" +#include "torch/csrc/utils/tensor_list.h" +#include "torch/csrc/utils/tensor_new.h" +#include "torch/csrc/utils/tensor_numpy.h" +#include "torch/csrc/utils/tensor_types.h" +#include "torch/csrc/utils/structseq.h" +#include "torch/csrc/autograd/generated/python_return_types.h" + +#include +#include +#include "c10/util/Optional.h" +#include "c10/core/Stream.h" + +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +$ops_headers +#include +#endif + +using at::DeviceGuard; +using at::device_of; +using at::OptionalDeviceGuard; +using at::Backend; +using at::Scalar; +using at::ScalarType; +using at::Tensor; +using c10::Stream; +using namespace torch::autograd::utils; + +namespace torch::autograd { + +static PyObject * THPVariable__is_view(PyObject *self, PyObject* args) +{ + HANDLE_TH_ERRORS + if (check_has_torch_function(self)) { + return handle_torch_function(self, "_is_view", args); + } + auto& self_ = THPVariable_Unpack(self); + if (self_.is_view()) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } + END_HANDLE_TH_ERRORS +} + +// implemented on the python object bc no support for first-class functions in native_functions.yaml +// See: ATen/native/README.md for more context +static PyObject * THPVariable_apply_(PyObject* self, PyObject* arg) +{ + HANDLE_TH_ERRORS + if (check_has_torch_function(self)) { + auto args = py::make_tuple(py::handle(arg)); + return handle_torch_function(self, "apply_", args.ptr()); + } + auto& self_ = THPVariable_Unpack(self); + if (self_.requires_grad()) { + throw std::runtime_error( + "Can't call apply_() on Variable that requires grad. Use " + "var.detach().apply_() instead."); + } + return THPVariable_Wrap(torch::utils::apply_(self_, arg)); + END_HANDLE_TH_ERRORS +} + +static PyObject * THPVariable_size(PyObject* self, PyObject* args, PyObject* kwargs) +{ + HANDLE_TH_ERRORS + static PythonArgParser parser({ + "size(int64_t? dim=None)", + "size(Dimname dim)", + }); + auto& self_ = THPVariable_Unpack(self); + ParsedArgs<3> parsed_args; + auto r = parser.parse(self, args, kwargs, parsed_args); + + if(r.has_torch_function()){ + return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor"); + } + if (r.idx == 0) { + if (!r.toInt64Optional(0).has_value()) { + return THPSize_NewFromSymSizes(self_); + } + if (jit::tracer::isTracing()) { + // will error out if a tensor has symints + return wrap(jit::tracer::getSizeOf(self_, r.toInt64(0))); + } else { + return torch::toPyObject(self_.sym_size(r.toInt64(0))); + } + } else if (r.idx == 1) { + if (jit::tracer::isTracing()) { + TORCH_INTERNAL_ASSERT(false, "NYI: Named tensors w/ JIT"); + } + return wrap(self_.size(r.dimname(0))); + } + Py_RETURN_NONE; + END_HANDLE_TH_ERRORS +} + +static PyObject * THPVariable_stride(PyObject* self, PyObject* args, PyObject* kwargs) +{ + HANDLE_TH_ERRORS + static PythonArgParser parser({ + "stride(int64_t? dim=None)", + "stride(Dimname dim)", + }); + auto& self_ = THPVariable_Unpack(self); + ParsedArgs<3> parsed_args; + auto r = parser.parse(self, args, kwargs, parsed_args); + + if(r.has_torch_function()){ + return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor"); + } + + if (r.idx == 0) { + if (r.toInt64Optional(0).has_value()) { + return torch::toPyObject(self_.sym_stride(r.toInt64(0))); + } + // yes, this is called strides in ATen. + at::SymIntArrayRef strides = self_.sym_strides(); + // we can't do the normal wrapping here because IntArrayRef maps to both + // torch.Size and tuple in python + // TODO: consider factoring this out + THPObjectPtr tuple(PyTuple_New(strides.size())); + if (!tuple) throw python_error(); + for (size_t i = 0; i != strides.size(); i++) { + PyObject* s = torch::toPyObject(strides[i]); + if (!s) throw python_error(); + PyTuple_SET_ITEM(tuple.get(), i, s); + } + return tuple.release(); + } else if (r.idx == 1) { + return wrap(self_.stride(r.dimname(0))); + } + Py_RETURN_NONE; + END_HANDLE_TH_ERRORS +} + +// implemented on the python object to avoid dispatch overhead +static PyObject * THPVariable_get_device(PyObject* self_, PyObject* args) +{ + HANDLE_TH_ERRORS + if (check_has_torch_function(self_)) { + return handle_torch_function(self_, "get_device", args, nullptr); + } + auto& self = THPVariable_Unpack(self_); + return wrap(self.get_device()); + END_HANDLE_TH_ERRORS +} + +static PyObject * THPVariable_has_names(PyObject* self_, PyObject* args) +{ + HANDLE_TH_ERRORS + if (check_has_torch_function(self_)) { + return handle_torch_function(self_, "has_names", args); + } + auto& self = THPVariable_Unpack(self_); + return wrap(self.has_names()); + END_HANDLE_TH_ERRORS +} + +// implemented on the python object to avoid dispatch overhead +static PyObject * THPVariable_data_ptr(PyObject* self_, PyObject* args) +{ + HANDLE_TH_ERRORS + if (check_has_torch_function(self_)) { + return handle_torch_function(self_, "data_ptr", args); + } + auto& self = THPVariable_Unpack(self_); + return wrap(self.data_ptr()); + END_HANDLE_TH_ERRORS +} + +// implemented on the python object to avoid dispatch overhead +static PyObject * THPVariable_storage_offset(PyObject* self_, PyObject* args) +{ + HANDLE_TH_ERRORS + if (check_has_torch_function(self_)) { + return handle_torch_function(self_, "storage_offset"); + } + auto& self = THPVariable_Unpack(self_); + return py::cast(self.sym_storage_offset()).release().ptr(); + END_HANDLE_TH_ERRORS +} + +// implemented on the python object to avoid dispatch overhead +static PyObject * THPVariable_dim(PyObject* self, PyObject* args) +{ + HANDLE_TH_ERRORS + if (check_has_torch_function(self)) { + return handle_torch_function(self, "dim", args); + } + auto& self_ = THPVariable_Unpack(self); + return THPUtils_packInt64(self_.dim()); + END_HANDLE_TH_ERRORS +} + +// implemented on the python object to avoid dispatch overhead +static PyObject * THPVariable_numel(PyObject* self, PyObject* args) +{ + HANDLE_TH_ERRORS + if (check_has_torch_function(self)) { + return handle_torch_function(self, "numel", args); + } + auto& self_ = THPVariable_Unpack(self); + if (jit::tracer::isTracing()) { + return wrap(jit::tracer::getNumelOf(self_)); + } else { + return py::cast(self_.sym_numel()).release().ptr(); + } + END_HANDLE_TH_ERRORS +} + +static Tensor dispatch_contiguous(const Tensor & self, at::MemoryFormat memory_format) { + pybind11::gil_scoped_release no_gil; + OptionalDeviceGuard device_guard(device_of(self)); + return self.contiguous(memory_format); +} + +static PyObject * THPVariable_contiguous(PyObject* self, PyObject* args, PyObject* kwargs) +{ + HANDLE_TH_ERRORS + static PythonArgParser parser({ + "contiguous(*, MemoryFormat memory_format=contiguous_format)", + }); + ParsedArgs<1> parsed_args; + auto r = parser.parse(self, args, kwargs, parsed_args); + + if(r.has_torch_function()){ + return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor"); + } + + auto& self_ = THPVariable_Unpack(self); + auto memory_format = r.memoryformat(0); + // avoids touching the GIL or current device if self is already contiguous + if (self_.is_contiguous(memory_format)) { + // NOTE: this logic is duplicated from VariableType.cpp. Since we need to + // record this call to contiguous() in the trace regardless of whether + // we actually call contiguous here, we need to record this information + // manually. + if (jit::tracer::isTracing()) { + auto tracer_state = jit::tracer::getTracingState(); + auto op_name = c10::Symbol::fromQualString("aten::contiguous"); + auto node = tracer_state->createNode(op_name, /*num_outputs=*/0); + jit::tracer::recordSourceLocation(node); + jit::tracer::addInputs(node, "self", self_); + jit::tracer::addInputs(node, "memory_format", memory_format); + tracer_state->insertNode(node); + jit::tracer::addOutput(node, self_); + } + Py_INCREF(self); + return self; + } + return THPVariable_Wrap(dispatch_contiguous(self_, memory_format)); + END_HANDLE_TH_ERRORS +} + +static Tensor dispatch_copy_(const Tensor & self, const Tensor & other, bool non_blocking) { + pybind11::gil_scoped_release no_gil; + OptionalDeviceGuard device_guard(device_of(self)); + return self.copy_(other, non_blocking); +} + + static PyObject * THPVariable_copy_(PyObject* self, PyObject* args, PyObject* kwargs) +{ + HANDLE_TH_ERRORS + static PythonArgParser parser({ + "copy_(Tensor other, bool non_blocking=False)", + "copy_(Tensor other, bool async=False)|deprecated" + }); + auto& self_ = THPVariable_Unpack(self); + ParsedArgs<2> parsed_args; + auto r = parser.parse(self, args, kwargs, parsed_args); + + if(r.has_torch_function()){ + return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor"); + } + + return THPVariable_Wrap(dispatch_copy_(self_, r.tensor(0), r.toBool(1))); + END_HANDLE_TH_ERRORS +} + +template +static T dispatch_to(const Tensor & self) { + pybind11::gil_scoped_release no_gil; + OptionalDeviceGuard device_guard(device_of(self)); + if (self.sym_numel() != 1) { + throw ValueError("only one element tensors can be converted to Python scalars"); + } + return self.template item(); +} + +static PyObject * THPVariable_float_scalar(PyObject* self, PyObject* args) { + HANDLE_TH_ERRORS + if (check_has_torch_function(self)) { + return handle_torch_function(self, "__float__", args); + } + jit::tracer::warn("Converting a tensor to a Python float", jit::tracer::WARN_PYTHON_DATAFLOW); + auto& self_ = THPVariable_Unpack(self); + return wrap(dispatch_to(self_)); + END_HANDLE_TH_ERRORS +} + +static PyObject * THPVariable_complex_scalar(PyObject* self, PyObject* args) { + HANDLE_TH_ERRORS + if (check_has_torch_function(self)) { + return handle_torch_function(self, "__complex__", args); + } + jit::tracer::warn("Converting a tensor to a Python complex", jit::tracer::WARN_PYTHON_DATAFLOW); + auto& self_ = THPVariable_Unpack(self); + return wrap(dispatch_to>(self_)); + END_HANDLE_TH_ERRORS +} + +static PyObject * THPVariable_integral_scalar(PyObject* self, PyObject* args) { + HANDLE_TH_ERRORS + if (check_has_torch_function(self)) { + return handle_torch_function(self, "__int__", args); + } + jit::tracer::warn("Converting a tensor to a Python integer", jit::tracer::WARN_PYTHON_DATAFLOW); + auto& self_ = THPVariable_Unpack(self); + if (isFloatingType(self_.scalar_type())) { + // we can't dispatch to item here because we want to avoid ATen overflow checks; + // the python integral type (long in python2) can't overflow. + return THPUtils_packDoubleAsInt(dispatch_to(self_)); + } else { + return wrap(dispatch_to(self_)); + } + END_HANDLE_TH_ERRORS +} + +// This is the __index__ function in Python which is similar to __int__, but +// called when used as a slice. +static PyObject * THPVariable_index_scalar(PyObject* self, PyObject* args) { + HANDLE_TH_ERRORS + if (check_has_torch_function(self)) { + return handle_torch_function(self, "__index__", args); + } + auto& self_ = THPVariable_Unpack(self); + // TODO: change the condition to `self_.dim() != 0` once we expose scalars + // in PyTorch. + if (!isIntegralType(self_.scalar_type(), /*includeBool=*/true) || self_.sym_numel() != 1) { + throw TypeError("only integer tensors of a single element can be converted to an index"); + } + return wrap(dispatch_to(self_)); + END_HANDLE_TH_ERRORS +} + +static Tensor dispatch_invert(const Tensor & self) { + pybind11::gil_scoped_release no_gil; + OptionalDeviceGuard device_guard(device_of(self)); + return self.bitwise_not(); +} + +static PyObject * THPVariable_invert(PyObject* self, PyObject* args) { + HANDLE_TH_ERRORS + if (check_has_torch_function(self)) { + return handle_torch_function(self, "__invert__", args); + } + auto& self_ = THPVariable_Unpack(self); + if (!isIntegralType(self_.scalar_type(), /*includeBool=*/true)) { + throw TypeError("~ (operator.invert) is only implemented on integer and Boolean-type tensors"); + } + return THPVariable_Wrap(dispatch_invert(self_)); + END_HANDLE_TH_ERRORS +} + +static Tensor dispatch_to(const Tensor & self, Device device, bool non_blocking, bool copy, c10::optional optional_memory_format) { + pybind11::gil_scoped_release no_gil; + // NOTE: this is where we record aten::to in the graph during tracing. However, the behavior of aten::to + // is different with respect to TensorOptions fields that are not present: aten::to inherits fields that + // are missing from the self argument while the tracer assumes that they should be populated with the + // default values (eg. float for scalar type). By explicitly copying over the tensor options here we fully + // specify all tensor options and thus record the proper trace + return self.to(self.options().device(device).memory_format(optional_memory_format), non_blocking, copy); +} + +static Tensor dispatch_to(const Tensor & self, bool non_blocking, bool copy, c10::optional optional_memory_format) { + pybind11::gil_scoped_release no_gil; + return self.to(self.options().memory_format(optional_memory_format), non_blocking, copy); +} + +static Tensor dispatch_to(const Tensor & self, ScalarType dtype, bool non_blocking, bool copy, c10::optional optional_memory_format) { + pybind11::gil_scoped_release no_gil; + // TODO: Make this call the TensorOptions version, maybe? + return self.to(dtype, non_blocking, copy, optional_memory_format); +} + +static Tensor dispatch_to(const Tensor & self, Device device, ScalarType dtype, bool non_blocking, bool copy, c10::optional optional_memory_format) { + pybind11::gil_scoped_release no_gil; + // TODO: Make this call the TensorOptions version, maybe? + return self.to(device, dtype, non_blocking, copy, optional_memory_format); +} + +static PyObject * THPVariable_cpu(PyObject* self, PyObject* args, PyObject* kwargs) +{ + HANDLE_TH_ERRORS + static PythonArgParser parser({ + "cpu(*, MemoryFormat? memory_format=None)" + }); + auto& self_ = THPVariable_Unpack(self); + ParsedArgs<1> parsed_args; + auto r = parser.parse(self, args, kwargs, parsed_args); + + if(r.has_torch_function()){ + return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor"); + } + + auto opt_memory_format = r.memoryformatOptional(0); + return THPVariable_Wrap(dispatch_to(self_, at::Device(at::DeviceType::CPU), false, false, opt_memory_format)); + END_HANDLE_TH_ERRORS +} + +static Tensor dispatch_nonzero(const Tensor & self) { + pybind11::gil_scoped_release no_gil; + OptionalDeviceGuard device_guard(device_of(self)); + return self.nonzero(); +} + +static std::vector dispatch_nonzero_numpy(const Tensor & self) { + pybind11::gil_scoped_release no_gil; + OptionalDeviceGuard device_guard(device_of(self)); + return self.nonzero_numpy(); +} + +static PyObject * THPVariable_nonzero(PyObject* self, PyObject* args, PyObject* kwargs) +{ + HANDLE_TH_ERRORS + static PythonArgParser parser({ + "nonzero()", + "nonzero(*, bool as_tuple)", + }); + auto& self_ = THPVariable_Unpack(self); + ParsedArgs<2> parsed_args; + auto r = parser.parse(self, args, kwargs, parsed_args); + + if(r.has_torch_function()){ + return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor"); + } + + if (r.idx == 0 || (r.idx == 1 && !r.toBool(0))) { + return wrap(dispatch_nonzero(self_)); + } else { + return wrap(dispatch_nonzero_numpy(self_)); + } + END_HANDLE_TH_ERRORS +} + +static PyObject * THPVariable_cuda(PyObject* self, PyObject* args, PyObject* kwargs) +{ + HANDLE_TH_ERRORS + static PythonArgParser parser({ + "cuda(Device? device=None, bool non_blocking=False, *, MemoryFormat? memory_format=None)", + "cuda(Device? device=None, bool async=False, *, MemoryFormat? memory_format=None)|deprecated" + }); + auto& self_ = THPVariable_Unpack(self); + ParsedArgs<3> parsed_args; + auto r = parser.parse(self, args, kwargs, parsed_args); + + if(r.has_torch_function()){ + return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor"); + } + + auto device = r.isNone(0) ? at::Device(at::DeviceType::CUDA) : r.device(0); + auto opt_memory_format = r.memoryformatOptional(2); + TORCH_CHECK(device.is_cuda(), "Invalid device, must be cuda device"); + torch::utils::cuda_lazy_init(); + return THPVariable_Wrap(dispatch_to(self_, device, r.toBool(1), false, opt_memory_format)); + END_HANDLE_TH_ERRORS +} + +static PyObject * THPVariable_xpu(PyObject* self, PyObject* args, PyObject* kwargs) +{ + HANDLE_TH_ERRORS + static PythonArgParser parser({ + "xpu(Device? device=None, bool non_blocking=False, *, MemoryFormat? memory_format=None)", + "xpu(Device? device=None, bool async=False, *, MemoryFormat? memory_format=None)|deprecated" + }); + auto& self_ = THPVariable_Unpack(self); + ParsedArgs<3> parsed_args; + auto r = parser.parse(self, args, kwargs, parsed_args); + + if (r.has_torch_function()) { + return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor"); + } + + auto device = r.isNone(0) ? at::Device(at::DeviceType::XPU) : r.device(0); + auto opt_memory_format = r.memoryformatOptional(2); + TORCH_CHECK(device.is_xpu(), "Invalid device, must be xpu device"); + return THPVariable_Wrap(dispatch_to(self_, device, r.toBool(1), false, opt_memory_format)); + END_HANDLE_TH_ERRORS +} + +static PyObject * THPVariable_ipu(PyObject* self, PyObject* args, PyObject* kwargs) +{ + HANDLE_TH_ERRORS + static PythonArgParser parser({ + "ipu(Device? device=None, bool non_blocking=False, *, MemoryFormat? memory_format=None)", + "ipu(Device? device=None, bool async=False, *, MemoryFormat? memory_format=None)|deprecated" + }); + auto& self_ = THPVariable_Unpack(self); + ParsedArgs<3> parsed_args; + auto r = parser.parse(self, args, kwargs, parsed_args); + + if (r.has_torch_function()) { + return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor"); + } + + auto device = r.isNone(0) ? at::Device(at::DeviceType::IPU) : r.device(0); + auto opt_memory_format = r.memoryformatOptional(2); + TORCH_CHECK(device.is_ipu(), "Invalid device, must be ipu device"); + return THPVariable_Wrap(dispatch_to(self_, device, r.toBool(1), false, opt_memory_format)); + END_HANDLE_TH_ERRORS +} + +static PyObject * THPVariable_to_type(PyObject* self, ScalarType scalarType, c10::optional optional_memory_format) { + HANDLE_TH_ERRORS + auto& self_ = THPVariable_Unpack(self); + return THPVariable_Wrap(dispatch_to(self_, scalarType, false, false, optional_memory_format)); + END_HANDLE_TH_ERRORS +} + +static PyObject * THPVariable_byte(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_TH_ERRORS + static PythonArgParser parser({ + "byte(*, MemoryFormat? memory_format=None)" + }); + ParsedArgs<1> parsed_args; + auto r = parser.parse(self, args, kwargs, parsed_args); + + if(r.has_torch_function()){ + return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor"); + } + + auto opt_memory_format = r.memoryformatOptional(0); + return THPVariable_to_type(self, ScalarType::Byte, opt_memory_format); + END_HANDLE_TH_ERRORS +} + +static PyObject * THPVariable_char(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_TH_ERRORS + static PythonArgParser parser({ + "char(*, MemoryFormat? memory_format=None)" + }); + ParsedArgs<1> parsed_args; + auto r = parser.parse(self, args, kwargs, parsed_args); + + if(r.has_torch_function()){ + return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor"); + } + + auto opt_memory_format = r.memoryformatOptional(0); + return THPVariable_to_type(self, ScalarType::Char, opt_memory_format); + END_HANDLE_TH_ERRORS +} + +static PyObject * THPVariable_double(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_TH_ERRORS + static PythonArgParser parser({ + "double(*, MemoryFormat? memory_format=None)" + }); + ParsedArgs<1> parsed_args; + auto r = parser.parse(self, args, kwargs, parsed_args); + + if(r.has_torch_function()){ + return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor"); + } + + auto opt_memory_format = r.memoryformatOptional(0); + return THPVariable_to_type(self, ScalarType::Double, opt_memory_format); + END_HANDLE_TH_ERRORS +} + +static PyObject * THPVariable_float(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_TH_ERRORS + static PythonArgParser parser({ + "float(*, MemoryFormat? memory_format=None)" + }); + ParsedArgs<1> parsed_args; + auto r = parser.parse(self, args, kwargs, parsed_args); + + if(r.has_torch_function()){ + return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor"); + } + + auto opt_memory_format = r.memoryformatOptional(0); + return THPVariable_to_type(self, ScalarType::Float, opt_memory_format); + END_HANDLE_TH_ERRORS +} + +static PyObject * THPVariable_cdouble(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_TH_ERRORS + static PythonArgParser parser({ + "cdouble(*, MemoryFormat? memory_format=None)" + }); + ParsedArgs<1> parsed_args; + auto r = parser.parse(self, args, kwargs, parsed_args); + + if(r.has_torch_function()){ + return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor"); + } + + auto opt_memory_format = r.memoryformatOptional(0); + return THPVariable_to_type(self, ScalarType::ComplexDouble, opt_memory_format); + END_HANDLE_TH_ERRORS +} + +static PyObject * THPVariable_cfloat(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_TH_ERRORS + static PythonArgParser parser({ + "cfloat(*, MemoryFormat? memory_format=None)" + }); + ParsedArgs<1> parsed_args; + auto r = parser.parse(self, args, kwargs, parsed_args); + + if(r.has_torch_function()){ + return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor"); + } + + auto opt_memory_format = r.memoryformatOptional(0); + return THPVariable_to_type(self, ScalarType::ComplexFloat, opt_memory_format); + END_HANDLE_TH_ERRORS +} + +static PyObject * THPVariable_half(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_TH_ERRORS + static PythonArgParser parser({ + "half(*, MemoryFormat? memory_format=None)" + }); + ParsedArgs<1> parsed_args; + auto r = parser.parse(self, args, kwargs, parsed_args); + + if(r.has_torch_function()){ + return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor"); + } + + auto opt_memory_format = r.memoryformatOptional(0); + return THPVariable_to_type(self, ScalarType::Half, opt_memory_format); + END_HANDLE_TH_ERRORS +} + +static PyObject * THPVariable_int(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_TH_ERRORS + static PythonArgParser parser({ + "int(*, MemoryFormat? memory_format=None)" + }); + ParsedArgs<1> parsed_args; + auto r = parser.parse(self, args, kwargs, parsed_args); + + if(r.has_torch_function()){ + return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor"); + } + + auto opt_memory_format = r.memoryformatOptional(0); + return THPVariable_to_type(self, ScalarType::Int, opt_memory_format); + END_HANDLE_TH_ERRORS +} + +static PyObject * THPVariable_long(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_TH_ERRORS + static PythonArgParser parser({ + "long(*, MemoryFormat? memory_format=None)" + }); + ParsedArgs<1> parsed_args; + auto r = parser.parse(self, args, kwargs, parsed_args); + + if(r.has_torch_function()){ + return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor"); + } + + auto opt_memory_format = r.memoryformatOptional(0); + return THPVariable_to_type(self, ScalarType::Long, opt_memory_format); + END_HANDLE_TH_ERRORS +} + +static PyObject * THPVariable_short(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_TH_ERRORS + static PythonArgParser parser({ + "short(*, MemoryFormat? memory_format=None)" + }); + ParsedArgs<1> parsed_args; + auto r = parser.parse(self, args, kwargs, parsed_args); + + if(r.has_torch_function()){ + return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor"); + } + + auto opt_memory_format = r.memoryformatOptional(0); + return THPVariable_to_type(self, ScalarType::Short, opt_memory_format); + END_HANDLE_TH_ERRORS +} + +static PyObject * THPVariable_bool(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_TH_ERRORS + static PythonArgParser parser({ + "bool(*, MemoryFormat? memory_format=None)" + }); + ParsedArgs<1> parsed_args; + auto r = parser.parse(self, args, kwargs, parsed_args); + + if(r.has_torch_function()){ + return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor"); + } + + auto opt_memory_format = r.memoryformatOptional(0); + return THPVariable_to_type(self, ScalarType::Bool, opt_memory_format); + END_HANDLE_TH_ERRORS +} + +static PyObject * THPVariable_bfloat16(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_TH_ERRORS + static PythonArgParser parser({ + "bfloat16(*, MemoryFormat? memory_format=None)" + }); + ParsedArgs<1> parsed_args; + auto r = parser.parse(self, args, kwargs, parsed_args); + + if(r.has_torch_function()){ + return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor"); + } + + auto opt_memory_format = r.memoryformatOptional(0); + return THPVariable_to_type(self, ScalarType::BFloat16, opt_memory_format); + END_HANDLE_TH_ERRORS +} + +static PyObject * THPVariable_element_size(PyObject* self, PyObject* args) +{ + HANDLE_TH_ERRORS + if (check_has_torch_function(self)) { + return handle_torch_function(self, "element_size", args); + } + auto& self_ = THPVariable_Unpack(self); + return THPUtils_packInt64(self_.element_size()); + END_HANDLE_TH_ERRORS +} + +// implemented on the python object bc PyObjects not declarable in native_functions.yaml +// See: ATen/native/README.md for more context +static PyObject * THPVariable_numpy(PyObject* self, PyObject* args, PyObject* kwargs) +{ + HANDLE_TH_ERRORS + static PythonArgParser parser({ + "numpy(*, bool force=False)" + }); + auto& self_ = THPVariable_Unpack(self); + ParsedArgs<1> parsed_args; + auto r = parser.parse(self, args, kwargs, parsed_args); + + if (r.has_torch_function()) { + return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor"); + } + + jit::tracer::warn("Converting a tensor to a NumPy array", jit::tracer::WARN_PYTHON_DATAFLOW); + return torch::utils::tensor_to_numpy(self_, r.toBool(0)); + END_HANDLE_TH_ERRORS +} + +static PyObject * THPVariable_requires_grad_(PyObject* self, PyObject* args, PyObject* kwargs) +{ + HANDLE_TH_ERRORS + static PythonArgParser parser({ + "requires_grad_(bool requires_grad=True)", + }); + auto& self_ = THPVariable_Unpack(self); + ParsedArgs<1> parsed_args; + auto r = parser.parse(self, args, kwargs, parsed_args); + + if(r.has_torch_function()){ + return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor"); + } + + // temporary hack to improve functorch UX. + const auto& functorch_tls = at::functorch::functorchTLSAccessor(); + if (functorch_tls) { + functorch_tls->checkSupportsInplaceRequiresGrad(); + } + + auto requires_grad = r.toBool(0); + // should we throw if requires_grad is true? var.requires_grad = True throws here + // but it's nice to let this be a no-op. + if (!self_.is_leaf() && !requires_grad) { + throw std::runtime_error(autograd::utils::requires_grad_leaf_error(requires_grad)); + } + if (requires_grad && ! isDifferentiableType(at::typeMetaToScalarType(self_.dtype()))) { + throw std::runtime_error("only Tensors of floating point dtype can require gradients"); + } + self_.set_requires_grad(requires_grad); + return THPVariable_Wrap(self_); + END_HANDLE_TH_ERRORS +} + +inline bool dispatch_is_contiguous(const Tensor & self, MemoryFormat memory_format) { + return self.is_contiguous(memory_format); +} + +// implemented on the python object to avoid dispatch overhead +static PyObject * THPVariable_is_contiguous(PyObject* self_, PyObject* args, PyObject* kwargs) +{ + HANDLE_TH_ERRORS + static PythonArgParser parser({ + "is_contiguous(*, MemoryFormat memory_format=contiguous_format)", + }); + ParsedArgs<1> parsed_args; + auto r = parser.parse(self_, args, kwargs, parsed_args); + + if(r.has_torch_function()){ + return handle_torch_function(r, self_, args, kwargs, PyObject_Type(self_), "torch.Tensor"); + } + + auto memory_format = r.memoryformat(0); + auto& self = THPVariable_Unpack(self_); + return wrap(dispatch_is_contiguous(self, memory_format)); + END_HANDLE_TH_ERRORS +} + +// implemented on the python object to avoid dispatch overhead +static PyObject * THPVariable_item(PyObject* self, PyObject* args) +{ + HANDLE_TH_ERRORS + if (check_has_torch_function(self)) { + return handle_torch_function(self, "item", args); + } + jit::tracer::warn("Converting a tensor to a Python number", jit::tracer::WARN_PYTHON_DATAFLOW); + auto& self_ = THPVariable_Unpack(self); + auto dispatch_item_ = [](const Tensor& self) -> at::Scalar { + pybind11::gil_scoped_release no_gil; + return self.item(); + }; + return py::cast(dispatch_item_(self_)).release().ptr(); + END_HANDLE_TH_ERRORS +} + +// implemented on the python object bc no support for first class functions in native_functions.yaml +// See: ATen/native/README.md for more context +static PyObject * THPVariable_map_(PyObject* self, PyObject* args, PyObject* kwargs) +{ + HANDLE_TH_ERRORS + static PythonArgParser parser({ "map_(Tensor other, PyObject* callable)" }); + auto& self_ = THPVariable_Unpack(self); + ParsedArgs<2> parsed_args; + auto r = parser.parse(self, args, kwargs, parsed_args); + + if(r.has_torch_function()){ + return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor"); + } + + Variable other = r.tensor(0); + if (self_.requires_grad() || other.requires_grad()) { + throw std::runtime_error( + "Can't call map_() on Variable that requires grad. Use " + "var.detach().map_() instead."); + } + TORCH_CHECK( + !self_.unsafeGetTensorImpl()->is_python_dispatch() && !other.unsafeGetTensorImpl()->is_python_dispatch(), + ".map_ is not supported for tensor subclasses."); + + return THPVariable_Wrap(torch::utils::map_(self_, other, r.pyobject(1))); + END_HANDLE_TH_ERRORS +} + +// implemented on the python object bc no support for first class functions in native_functions.yaml +// See: ATen/native/README.md for more context +static PyObject * THPVariable_map2_(PyObject* self, PyObject* args, PyObject* kwargs) +{ + HANDLE_TH_ERRORS + static PythonArgParser parser({ "map2_(Tensor x, Tensor y, PyObject* callable)" }); + auto& self_ = THPVariable_Unpack(self); + ParsedArgs<3> parsed_args; + auto r = parser.parse(self, args, kwargs, parsed_args); + + if(r.has_torch_function()){ + return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor"); + } + + Variable x = r.tensor(0); + Variable y = r.tensor(1); + if (self_.requires_grad() || x.requires_grad() || y.requires_grad()) { + throw std::runtime_error( + "Can't call map2_() on Variable that requires grad. Use " + "var.detach().map2_() instead."); + } + TORCH_CHECK( + !x.unsafeGetTensorImpl()->is_python_dispatch() && !y.unsafeGetTensorImpl()->is_python_dispatch(), + ".map2_ is not supported for tensor subclasses."); + return THPVariable_Wrap(torch::utils::map2_(self_, x, y, r.pyobject(2))); + END_HANDLE_TH_ERRORS +} + +static PyObject * THPVariable_new(PyObject* self, PyObject* args, PyObject* kwargs) +{ + HANDLE_TH_ERRORS + if (check_has_torch_function(self)) { + return handle_torch_function(self, "new", args, kwargs); + } + auto& self_ = THPVariable_Unpack(self); + OptionalDeviceGuard device_guard(device_of(self_)); + return THPVariable_Wrap(torch::utils::legacy_tensor_new(legacyExtractDispatchKey(self_), self_.scalar_type(), args, kwargs)); + END_HANDLE_TH_ERRORS +} + +static PyObject * THPVariable_new_tensor(PyObject* self, PyObject* args, PyObject* kwargs) +{ + HANDLE_TH_ERRORS + if (check_has_torch_function(self)) { + return handle_torch_function(self, "new_tensor", args, kwargs); + } + auto& self_ = THPVariable_Unpack(self); + OptionalDeviceGuard device_guard(device_of(self_)); + return THPVariable_Wrap(torch::utils::new_tensor(legacyExtractDispatchKey(self_), self_.scalar_type(), args, kwargs)); + END_HANDLE_TH_ERRORS +} + +static PyObject * THPVariable_storage(PyObject* self, PyObject* arg) +{ + HANDLE_TH_ERRORS + if (check_has_torch_function(self)) { + return handle_torch_function(self, "untyped_storage"); + } + auto& self_ = THPVariable_Unpack(self); + return createPyObject(self_.storage()); + END_HANDLE_TH_ERRORS +} + +static PyObject * THPVariable_to(PyObject* self, PyObject* args, PyObject* kwargs) +{ + HANDLE_TH_ERRORS + static PythonArgParser parser({ + "to(Device device=None, ScalarType dtype=None, bool non_blocking=False, bool copy=False, *, MemoryFormat? memory_format=None)", + "to(ScalarType dtype, bool non_blocking=False, bool copy=False, *, MemoryFormat? memory_format=None)", + "to(Tensor tensor, bool non_blocking=False, bool copy=False, *, MemoryFormat? memory_format=None)", + }); + ParsedArgs<5> parsed_args; + auto r = parser.parse(self, args, kwargs, parsed_args); + if (r.has_torch_function()) { + return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor"); + } + auto parsed = parse_to_conversion(r, /*allow_copy*/ true); + auto& device = std::get<0>(parsed); + auto& scalarType = std::get<1>(parsed); + auto non_blocking = std::get<2>(parsed); + auto copy = std::get<3>(parsed); + auto opt_memory_format = std::get<4>(parsed); + auto& self_ = THPVariable_Unpack(self); + if (device && device->is_cuda()) { + torch::utils::cuda_lazy_init(); + } + if (!device && !scalarType && !copy && !opt_memory_format.has_value()) { + Py_INCREF(self); + return self; + } else if (!device && !scalarType) { + return THPVariable_Wrap( + dispatch_to(self_, non_blocking, copy, opt_memory_format)); + } else if (!device) { + return THPVariable_Wrap(dispatch_to(self_, *scalarType, non_blocking, copy, opt_memory_format)); + } else if (!scalarType) { + return THPVariable_Wrap(dispatch_to(self_, *device, non_blocking, copy, opt_memory_format)); + } else { + return THPVariable_Wrap(dispatch_to(self_, *device, *scalarType, non_blocking, copy, opt_memory_format)); + } + Py_RETURN_NONE; + END_HANDLE_TH_ERRORS +} + +// implemented on the python object b/c arbitrarily nested list not declarable in native_functions.yaml +// See: ATen/native/README.md for more context +static PyObject * THPVariable_tolist(PyObject* self, PyObject* args) +{ + HANDLE_TH_ERRORS + if (check_has_torch_function(self)) { + return handle_torch_function(self, "tolist", args); + } + jit::tracer::warn("Converting a tensor to a Python list", jit::tracer::WARN_PYTHON_DATAFLOW); + auto self_ = THPVariable_Unpack(self); + return torch::utils::tensor_to_list(self_); + END_HANDLE_TH_ERRORS +} + +static PyObject * THPVariable_type(PyObject* self, PyObject* args, PyObject* kwargs) +{ + HANDLE_TH_ERRORS + static PythonArgParser parser({ + "type(PyObject* dtype=None, bool non_blocking=False, *, MemoryFormat? memory_format=None)", + "type(PyObject* dtype=None, bool async=False, *, MemoryFormat? memory_format=None)|deprecated" + }); + auto& self_ = THPVariable_Unpack(self); + ParsedArgs<3> parsed_args; + auto r = parser.parse(self, args, kwargs, parsed_args); + + if(r.has_torch_function()){ + return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor"); + } + + if (r.isNone(0)) { + return THPUtils_packString(torch::utils::options_to_string(self_.options())); + } + auto obj = r.pyobject(0); + auto opt_memory_format = r.memoryformatOptional(2); + std::string type_name; + bool is_dtype = false; + if (PyType_Check(obj)) { + if (obj == THPVariableClass) { + type_name = "torch.Tensor"; + } else { + type_name = ((PyTypeObject*)obj)->tp_name; + } + } else if (THPUtils_checkString(obj)) { + type_name = THPUtils_unpackString(obj); + } else if (THPDtype_Check(obj)) { + is_dtype = true; + } else { + throw TypeError("dtype must be a type, str, or dtype object"); + } + ScalarType scalar_type; + Device device = self_.device(); + if (is_dtype) { + scalar_type = r.scalartype(0); + return THPVariable_Wrap(dispatch_to(self_, scalar_type, /*non_blocking=*/ r.toBool(1), /*copy=*/ false, opt_memory_format)); + } + at::TensorOptions options = torch::utils::options_from_string(type_name); + scalar_type = at::typeMetaToScalarType(options.dtype()); + auto device_type = options.device().type(); + if (device_type != device.type()) { + device = at::Device(device_type); + } + if (device.is_cuda()) { + torch::utils::cuda_lazy_init(); + } + return THPVariable_Wrap(dispatch_to(self_, device, scalar_type, /*non_blocking=*/ r.toBool(1), /*copy=*/ false, opt_memory_format)); + END_HANDLE_TH_ERRORS +} + +// generated methods start here + +${py_methods} + +static PyObject * THPVariable_bool_scalar(PyObject* self, PyObject* args) { + if (check_has_torch_function(self)) { + HANDLE_TH_ERRORS + return handle_torch_function(self, "__bool__", args); + END_HANDLE_TH_ERRORS + } + jit::tracer::warn("Converting a tensor to a Python boolean", jit::tracer::WARN_PYTHON_DATAFLOW); + return THPVariable_is_nonzero(self, args); +} + +// Wrapper converts a raised TypeError into returning NotImplemented +// Used to implement binary arithmetic operators +template +static PyObject * TypeError_to_NotImplemented_(PyObject* self, PyObject* args, PyObject* kwargs) { + + PyObject* ret = Func(self, args, kwargs); + if (!ret && PyErr_ExceptionMatches(PyExc_TypeError)) { + PyErr_Clear(); + Py_INCREF(Py_NotImplemented); + ret = Py_NotImplemented; + } + return ret; +} + +// set_ has to be defined in the template because the c10::Storage object +// does not have a type, and we need to make sure the Python storage object's +// type matches the tensor's type +static PyObject* THPVariable_set_( + PyObject* self_, + PyObject* args, + PyObject* kwargs) { + HANDLE_TH_ERRORS + const Tensor& self = THPVariable_Unpack(self_); + static PythonArgParser parser( + { + "set_()", + "set_(Storage source)", + "set_(Storage source, SymInt storage_offset, SymIntArrayRef size, SymIntArrayRef stride=None)", + "set_(Tensor source)", + "set_(Tensor source, SymInt storage_offset, SymIntArrayRef size, SymIntArrayRef stride=None)", + }, + /*traceable=*/false); + + ParsedArgs<4> parsed_args; + auto _r = parser.parse(args, kwargs, parsed_args); + + switch (_r.idx) { + case 0: { + // aten::set_(Tensor(a!) self) -> Tensor(a!) + auto dispatch_set_ = [](const Tensor& self) -> Tensor { + pybind11::gil_scoped_release no_gil; + return self.set_(); + }; + return wrap(dispatch_set_(self)); + } + case 1: { + // aten::set_.source_Storage(Tensor(a!) self, Storage source) -> + // Tensor(a!) + at::ScalarType storage_scalar_type; + bool is_typed_storage = true; + at::Storage storage = _r.storage(0, storage_scalar_type, is_typed_storage); + TORCH_CHECK(storage_scalar_type == self.dtype() || !is_typed_storage, + "Expected a Storage of type ", self.dtype(), + " or an UntypedStorage, but got type ", storage_scalar_type, + " for argument 1 'storage'"); + auto dispatch_set_ = [](const Tensor& self, Storage source) -> Tensor { + pybind11::gil_scoped_release no_gil; + return self.set_(source); + }; + return wrap(dispatch_set_(self, storage)); + } + case 2: { + // aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage + // source, int storage_offset, int[] size, int[] stride=[]) -> Tensor(a!) + at::ScalarType storage_scalar_type; + bool is_typed_storage = true; + at::Storage storage = _r.storage(0, storage_scalar_type, is_typed_storage); + TORCH_CHECK(storage_scalar_type == self.dtype() || !is_typed_storage, + "Expected a Storage of type ", self.dtype(), + " or an UntypedStorage, but got type ", storage_scalar_type, + " for argument 1 'storage'"); + auto dispatch_set_ = [](const Tensor& self, + Storage source, + c10::SymInt storage_offset, + c10::SymIntArrayRef size, + c10::SymIntArrayRef stride) -> Tensor { + pybind11::gil_scoped_release no_gil; + return self.set__symint(source, storage_offset, size, stride); + }; + return wrap(dispatch_set_( + self, storage, _r.toSymInt(1), _r.symintlist(2), _r.symintlist(3))); + } + case 3: { + // aten::set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!) + auto dispatch_set_ = [](const Tensor& self, const Tensor& source) -> Tensor { + TORCH_CHECK(source.dtype() == self.dtype(), "Could not set tensor of type ", source.dtype(), " to a tensor of type ", self.dtype()); + pybind11::gil_scoped_release no_gil; + return self.set_(source); + }; + return wrap(dispatch_set_(self, _r.tensor(0))); + } + case 4: { + // aten::set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor + // source, int storage_offset, int[] size, int[] stride=[]) -> Tensor(a!) + at::Tensor storage = _r.tensor(0); + auto dispatch_set_ = [](const Tensor& self, + const Tensor& source, + c10::SymInt storage_offset, + c10::SymIntArrayRef size, + c10::SymIntArrayRef stride) -> Tensor { + pybind11::gil_scoped_release no_gil; + return self.set__symint(source, storage_offset, size, stride); + }; + return wrap(dispatch_set_( + self, storage, _r.toSymInt(1), _r.symintlist(2), _r.symintlist(3))); + } + } + Py_RETURN_NONE; + END_HANDLE_TH_ERRORS +} + +// XXX: ops that are bound here are not exposed to the C++ api nor the JIT. +// Any new ops added here should be accompanied with a comment why they are not +// being registered through native_functions.yaml, and be tagged cpp / JIT +PyMethodDef variable_methods[] = { + // These magic methods are all implemented on python object to wrap NotImplementedError + {"__add__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"__radd__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"__iadd__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"__rmul__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"__mul__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"__imul__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"__sub__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"__isub__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"__div__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"__truediv__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"__floordiv__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"__idiv__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"__ifloordiv__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"__mod__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"__imod__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"__eq__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"__ne__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"__lt__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"__le__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"__gt__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"__ge__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"__rand__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"__ror__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"__rxor__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"__bool__", THPVariable_bool_scalar, METH_NOARGS, NULL}, + {"__float__", THPVariable_float_scalar, METH_NOARGS, NULL}, + {"__complex__", THPVariable_complex_scalar, METH_NOARGS, NULL}, + {"__int__", THPVariable_integral_scalar, METH_NOARGS, NULL}, + {"__long__", THPVariable_integral_scalar, METH_NOARGS, NULL}, + {"__index__", THPVariable_index_scalar, METH_NOARGS, NULL}, + {"__nonzero__", THPVariable_bool_scalar, METH_NOARGS, NULL}, + {"__invert__", THPVariable_invert, METH_NOARGS, NULL}, + {"__matmul__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"_is_view", THPVariable__is_view, METH_NOARGS, NULL}, + {"apply_", THPVariable_apply_, METH_O, NULL}, + {"bfloat16", castPyCFunctionWithKeywords(THPVariable_bfloat16), METH_VARARGS | METH_KEYWORDS, NULL}, + {"byte", castPyCFunctionWithKeywords(THPVariable_byte), METH_VARARGS | METH_KEYWORDS, NULL}, + {"char", castPyCFunctionWithKeywords(THPVariable_char), METH_VARARGS | METH_KEYWORDS, NULL}, + {"contiguous", castPyCFunctionWithKeywords(THPVariable_contiguous), METH_VARARGS | METH_KEYWORDS, NULL}, + {"copy_", castPyCFunctionWithKeywords(THPVariable_copy_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"cpu", castPyCFunctionWithKeywords(THPVariable_cpu), METH_VARARGS | METH_KEYWORDS, NULL}, + {"cuda", castPyCFunctionWithKeywords(THPVariable_cuda), METH_VARARGS | METH_KEYWORDS, NULL}, + {"xpu", castPyCFunctionWithKeywords(THPVariable_xpu), METH_VARARGS | METH_KEYWORDS, NULL}, + {"ipu", castPyCFunctionWithKeywords(THPVariable_ipu), METH_VARARGS | METH_KEYWORDS, NULL}, + {"data_ptr", THPVariable_data_ptr, METH_NOARGS, NULL}, + {"dim", THPVariable_dim, METH_NOARGS, NULL}, + {"has_names", THPVariable_has_names, METH_NOARGS, NULL}, + {"double", castPyCFunctionWithKeywords(THPVariable_double), METH_VARARGS | METH_KEYWORDS, NULL}, + {"cdouble", castPyCFunctionWithKeywords(THPVariable_cdouble), METH_VARARGS | METH_KEYWORDS, NULL}, + {"element_size", THPVariable_element_size, METH_NOARGS, NULL}, + {"float", castPyCFunctionWithKeywords(THPVariable_float), METH_VARARGS | METH_KEYWORDS, NULL}, + {"cfloat", castPyCFunctionWithKeywords(THPVariable_cfloat), METH_VARARGS | METH_KEYWORDS, NULL}, + {"get_device", THPVariable_get_device, METH_NOARGS, NULL}, + {"bool", castPyCFunctionWithKeywords(THPVariable_bool), METH_VARARGS | METH_KEYWORDS, NULL}, + {"half", castPyCFunctionWithKeywords(THPVariable_half), METH_VARARGS | METH_KEYWORDS, NULL}, + {"int", castPyCFunctionWithKeywords(THPVariable_int), METH_VARARGS | METH_KEYWORDS, NULL}, + {"is_contiguous", castPyCFunctionWithKeywords(THPVariable_is_contiguous), METH_VARARGS | METH_KEYWORDS, NULL}, + {"item", THPVariable_item, METH_NOARGS, NULL}, + {"long", castPyCFunctionWithKeywords(THPVariable_long), METH_VARARGS | METH_KEYWORDS, NULL}, + {"map_", castPyCFunctionWithKeywords(THPVariable_map_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"map2_", castPyCFunctionWithKeywords(THPVariable_map2_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"ndimension", THPVariable_dim, METH_NOARGS, NULL}, + {"nelement", THPVariable_numel, METH_NOARGS, NULL}, + {"new", castPyCFunctionWithKeywords(THPVariable_new), METH_VARARGS | METH_KEYWORDS, NULL}, + {"new_tensor", castPyCFunctionWithKeywords(THPVariable_new_tensor), METH_VARARGS | METH_KEYWORDS, NULL}, + {"nonzero", castPyCFunctionWithKeywords(THPVariable_nonzero), METH_VARARGS | METH_KEYWORDS, NULL}, + {"numel", THPVariable_numel, METH_NOARGS, NULL}, + {"numpy", castPyCFunctionWithKeywords(THPVariable_numpy), METH_VARARGS | METH_KEYWORDS, NULL}, + {"requires_grad_", castPyCFunctionWithKeywords(THPVariable_requires_grad_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"set_", castPyCFunctionWithKeywords(THPVariable_set_), METH_VARARGS | METH_KEYWORDS, NULL}, + {"short", castPyCFunctionWithKeywords(THPVariable_short), METH_VARARGS | METH_KEYWORDS, NULL}, + {"size", castPyCFunctionWithKeywords(THPVariable_size), METH_VARARGS | METH_KEYWORDS, NULL}, + {"untyped_storage", THPVariable_storage, METH_NOARGS, NULL}, + {"storage_offset", THPVariable_storage_offset, METH_NOARGS, NULL}, + {"stride", castPyCFunctionWithKeywords(THPVariable_stride), METH_VARARGS | METH_KEYWORDS, NULL}, + {"to", castPyCFunctionWithKeywords(THPVariable_to), METH_VARARGS | METH_KEYWORDS, NULL}, + {"tolist", THPVariable_tolist, METH_NOARGS, NULL}, + {"type", castPyCFunctionWithKeywords(THPVariable_type), METH_VARARGS | METH_KEYWORDS, NULL}, + ${py_method_defs} + {NULL} +}; + +} // namespace torch::autograd diff --git a/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/variable_factories.h b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/variable_factories.h new file mode 100644 index 0000000000000000000000000000000000000000..1129d899c89752eb1e6af264fc8dfcef0c862bcd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/variable_factories.h @@ -0,0 +1,135 @@ +#pragma once + +// ${generated_comment} + +#include +#include +#include +#include +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +$ops_headers +#endif + +#include +#include +#include + +namespace torch { + +/// NOTE: Currently `torch::tensor(...)` doesn't support mixed data types +/// (i.e. `torch::tensor({{bool, 2.0}})` doesn't work). We might be able to +/// support it in the future by iterating over all sub-lists to find +/// the largest data type that can represent all of the elements, or by using +/// variadic templates. +/// +/// NOTE: C++ `torch::tensor` with a floating-point type or an `at::ArrayRef` / `std::vector` / +/// (nested) braced-init-list of floating-point types always produces a tensor of dtype +/// `torch::get_default_dtype()`, matching Python `torch.tensor` behavior. +/// +/// NOTE: C++ `torch::tensor` with an integer type or an `at::ArrayRef` / `std::vector` / +/// (nested) braced-init-list of integer types always produces a tensor of dtype `at::kLong` +/// (aka. int64_t), matching Python `torch.tensor` behavior. +/// +/// NOTE: The following dtypes are not supported by `torch::tensor` currently: +/// - `unsigned int` +/// - `unsigned long int` +/// - `unsigned long long int` +/// - `long long int` +inline at::Tensor tensor(detail::TensorDataContainer tensor_data_container, const at::TensorOptions& options = {}) { + return autograd::make_variable( + // note: we remove the requires_grad setting from the TensorOptions because + // it is ignored anyways (and we actually have an assertion that it isn't set + // which would fail otherwise). We handle requires_grad explicitly here + // instead of passing it through to the kernel. + tensor_data_container.convert_to_tensor(options.requires_grad(c10::nullopt)), + options.requires_grad()); +} + +/// A generic deleter function. +using Deleter = std::function; +using at::MemoryFormat; + +/// Exposes the given `data` as a `Tensor` without taking ownership of the +/// original data. `sizes` should specify the shape of the tensor, `strides` the +/// stride in each dimension. The `deleter` function (a +/// `std::function`) will be called on the `data` when the Tensor +/// data would normally be deallocated. The `TensorOptions` specify additional +/// configuration options for the returned tensor, such as what type to +/// interpret the `data` as. +inline at::Tensor from_blob( + void* data, + at::IntArrayRef sizes, + at::IntArrayRef strides, + const Deleter& deleter, + const at::TensorOptions& options = at::TensorOptions()) { + at::Tensor tensor = ([&]() { + at::AutoDispatchBelowAutograd guard; // TODO: remove + at::tracer::impl::NoTracerDispatchMode tracer_guard; + return at::from_blob(data, sizes, strides, deleter, options.requires_grad(c10::nullopt)); + })(); + return autograd::make_variable(tensor, options.requires_grad()); +} + +/// Exposes the given `data` as a `Tensor` without taking ownership of the +/// original data. `sizes` should specify the shape of the tensor, `strides` the +/// stride in each dimension. The `TensorOptions` +/// specify additional configuration options for the returned tensor, such as +/// what type to interpret the `data` as. +inline at::Tensor from_blob( + void* data, + at::IntArrayRef sizes, + at::IntArrayRef strides, + const at::TensorOptions& options = at::TensorOptions()) { + at::Tensor tensor = ([&]() { + at::AutoDispatchBelowAutograd guard; // TODO: remove + at::tracer::impl::NoTracerDispatchMode tracer_guard; + return at::from_blob(data, sizes, strides, options.requires_grad(c10::nullopt)); + })(); + return autograd::make_variable(tensor, options.requires_grad()); +} + +/// Exposes the given `data` as a `Tensor` without taking ownership of the +/// original data. `sizes` should specify the shape of the tensor. The `deleter` +/// (a `std::function`) function will be called on the `data` when +/// the Tensor data would normally be deallocated. The `TensorOptions` specify +/// additional configuration options for the returned tensor, such as what type +/// to interpret the `data` as. +inline at::Tensor from_blob( + void* data, + at::IntArrayRef sizes, + const Deleter& deleter, + const at::TensorOptions& options = at::TensorOptions()) { + at::Tensor tensor = ([&]() { + at::AutoDispatchBelowAutograd guard; // TODO: remove + at::tracer::impl::NoTracerDispatchMode tracer_guard; + return at::from_blob(data, sizes, deleter, options.requires_grad(c10::nullopt)); + })(); + return autograd::make_variable(tensor, options.requires_grad()); +} + +/// Exposes the given `data` as a `Tensor` without taking ownership of the +/// original data. `sizes` should specify the shape of the tensor. The +/// `TensorOptions` specify additional configuration options for the returned +/// tensor, such as what type to interpret the `data` as. +inline at::Tensor from_blob( + void* data, + at::IntArrayRef sizes, + const at::TensorOptions& options = at::TensorOptions()) { + at::Tensor tensor = ([&]() { + at::AutoDispatchBelowAutograd guard; // TODO: remove + at::tracer::impl::NoTracerDispatchMode tracer_guard; + return at::from_blob(data, sizes, options.requires_grad(c10::nullopt)); + })(); + return autograd::make_variable(tensor, options.requires_grad()); +} + +${function_definitions} + +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/urllib3/util/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/urllib3/util/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9befa0b0e867b585b926c3247c26ce38d1b329f3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/urllib3/util/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/urllib3/util/__pycache__/connection.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/urllib3/util/__pycache__/connection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec4901cb18b4349c8b55c4f0c61c68ecc00ce662 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/urllib3/util/__pycache__/connection.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/urllib3/util/__pycache__/request.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/urllib3/util/__pycache__/request.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4489c091ff8ba5a7f4382fb51e2ac5bd4bd8d1a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/urllib3/util/__pycache__/request.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/urllib3/util/__pycache__/retry.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/urllib3/util/__pycache__/retry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7145e6fbfc4c2b087ec977cc63f4d982b6b3cff3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/urllib3/util/__pycache__/retry.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/urllib3/util/__pycache__/ssl_.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/urllib3/util/__pycache__/ssl_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfcc0e0ba6e64da575eacdf6cc0b052af659b95d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/urllib3/util/__pycache__/ssl_.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/urllib3/util/__pycache__/util.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/urllib3/util/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ceb902b555c3654fd8b9ac0babb8b939aa799d30 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/urllib3/util/__pycache__/util.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/urllib3/util/connection.py b/env-llmeval/lib/python3.10/site-packages/urllib3/util/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..5c7da73f4e0e57cfe9074c50c2628300130fc94d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/urllib3/util/connection.py @@ -0,0 +1,137 @@ +from __future__ import annotations + +import socket +import typing + +from ..exceptions import LocationParseError +from .timeout import _DEFAULT_TIMEOUT, _TYPE_TIMEOUT + +_TYPE_SOCKET_OPTIONS = typing.Sequence[typing.Tuple[int, int, typing.Union[int, bytes]]] + +if typing.TYPE_CHECKING: + from .._base_connection import BaseHTTPConnection + + +def is_connection_dropped(conn: BaseHTTPConnection) -> bool: # Platform-specific + """ + Returns True if the connection is dropped and should be closed. + :param conn: :class:`urllib3.connection.HTTPConnection` object. + """ + return not conn.is_connected + + +# This function is copied from socket.py in the Python 2.7 standard +# library test suite. Added to its signature is only `socket_options`. +# One additional modification is that we avoid binding to IPv6 servers +# discovered in DNS if the system doesn't have IPv6 functionality. +def create_connection( + address: tuple[str, int], + timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT, + source_address: tuple[str, int] | None = None, + socket_options: _TYPE_SOCKET_OPTIONS | None = None, +) -> socket.socket: + """Connect to *address* and return the socket object. + + Convenience function. Connect to *address* (a 2-tuple ``(host, + port)``) and return the socket object. Passing the optional + *timeout* parameter will set the timeout on the socket instance + before attempting to connect. If no *timeout* is supplied, the + global default timeout setting returned by :func:`socket.getdefaulttimeout` + is used. If *source_address* is set it must be a tuple of (host, port) + for the socket to bind as a source address before making the connection. + An host of '' or port 0 tells the OS to use the default. + """ + + host, port = address + if host.startswith("["): + host = host.strip("[]") + err = None + + # Using the value from allowed_gai_family() in the context of getaddrinfo lets + # us select whether to work with IPv4 DNS records, IPv6 records, or both. + # The original create_connection function always returns all records. + family = allowed_gai_family() + + try: + host.encode("idna") + except UnicodeError: + raise LocationParseError(f"'{host}', label empty or too long") from None + + for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM): + af, socktype, proto, canonname, sa = res + sock = None + try: + sock = socket.socket(af, socktype, proto) + + # If provided, set socket level options before connecting. + _set_socket_options(sock, socket_options) + + if timeout is not _DEFAULT_TIMEOUT: + sock.settimeout(timeout) + if source_address: + sock.bind(source_address) + sock.connect(sa) + # Break explicitly a reference cycle + err = None + return sock + + except OSError as _: + err = _ + if sock is not None: + sock.close() + + if err is not None: + try: + raise err + finally: + # Break explicitly a reference cycle + err = None + else: + raise OSError("getaddrinfo returns an empty list") + + +def _set_socket_options( + sock: socket.socket, options: _TYPE_SOCKET_OPTIONS | None +) -> None: + if options is None: + return + + for opt in options: + sock.setsockopt(*opt) + + +def allowed_gai_family() -> socket.AddressFamily: + """This function is designed to work in the context of + getaddrinfo, where family=socket.AF_UNSPEC is the default and + will perform a DNS search for both IPv6 and IPv4 records.""" + + family = socket.AF_INET + if HAS_IPV6: + family = socket.AF_UNSPEC + return family + + +def _has_ipv6(host: str) -> bool: + """Returns True if the system can bind an IPv6 address.""" + sock = None + has_ipv6 = False + + if socket.has_ipv6: + # has_ipv6 returns true if cPython was compiled with IPv6 support. + # It does not tell us if the system has IPv6 support enabled. To + # determine that we must bind to an IPv6 address. + # https://github.com/urllib3/urllib3/pull/611 + # https://bugs.python.org/issue658327 + try: + sock = socket.socket(socket.AF_INET6) + sock.bind((host, 0)) + has_ipv6 = True + except Exception: + pass + + if sock: + sock.close() + return has_ipv6 + + +HAS_IPV6 = _has_ipv6("::1") diff --git a/env-llmeval/lib/python3.10/site-packages/urllib3/util/ssltransport.py b/env-llmeval/lib/python3.10/site-packages/urllib3/util/ssltransport.py new file mode 100644 index 0000000000000000000000000000000000000000..fa9f2b37c5bb5c2b3fba3f138ab86a05d154608c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/urllib3/util/ssltransport.py @@ -0,0 +1,280 @@ +from __future__ import annotations + +import io +import socket +import ssl +import typing + +from ..exceptions import ProxySchemeUnsupported + +if typing.TYPE_CHECKING: + from typing import Literal + + from .ssl_ import _TYPE_PEER_CERT_RET, _TYPE_PEER_CERT_RET_DICT + + +_SelfT = typing.TypeVar("_SelfT", bound="SSLTransport") +_WriteBuffer = typing.Union[bytearray, memoryview] +_ReturnValue = typing.TypeVar("_ReturnValue") + +SSL_BLOCKSIZE = 16384 + + +class SSLTransport: + """ + The SSLTransport wraps an existing socket and establishes an SSL connection. + + Contrary to Python's implementation of SSLSocket, it allows you to chain + multiple TLS connections together. It's particularly useful if you need to + implement TLS within TLS. + + The class supports most of the socket API operations. + """ + + @staticmethod + def _validate_ssl_context_for_tls_in_tls(ssl_context: ssl.SSLContext) -> None: + """ + Raises a ProxySchemeUnsupported if the provided ssl_context can't be used + for TLS in TLS. + + The only requirement is that the ssl_context provides the 'wrap_bio' + methods. + """ + + if not hasattr(ssl_context, "wrap_bio"): + raise ProxySchemeUnsupported( + "TLS in TLS requires SSLContext.wrap_bio() which isn't " + "available on non-native SSLContext" + ) + + def __init__( + self, + socket: socket.socket, + ssl_context: ssl.SSLContext, + server_hostname: str | None = None, + suppress_ragged_eofs: bool = True, + ) -> None: + """ + Create an SSLTransport around socket using the provided ssl_context. + """ + self.incoming = ssl.MemoryBIO() + self.outgoing = ssl.MemoryBIO() + + self.suppress_ragged_eofs = suppress_ragged_eofs + self.socket = socket + + self.sslobj = ssl_context.wrap_bio( + self.incoming, self.outgoing, server_hostname=server_hostname + ) + + # Perform initial handshake. + self._ssl_io_loop(self.sslobj.do_handshake) + + def __enter__(self: _SelfT) -> _SelfT: + return self + + def __exit__(self, *_: typing.Any) -> None: + self.close() + + def fileno(self) -> int: + return self.socket.fileno() + + def read(self, len: int = 1024, buffer: typing.Any | None = None) -> int | bytes: + return self._wrap_ssl_read(len, buffer) + + def recv(self, buflen: int = 1024, flags: int = 0) -> int | bytes: + if flags != 0: + raise ValueError("non-zero flags not allowed in calls to recv") + return self._wrap_ssl_read(buflen) + + def recv_into( + self, + buffer: _WriteBuffer, + nbytes: int | None = None, + flags: int = 0, + ) -> None | int | bytes: + if flags != 0: + raise ValueError("non-zero flags not allowed in calls to recv_into") + if nbytes is None: + nbytes = len(buffer) + return self.read(nbytes, buffer) + + def sendall(self, data: bytes, flags: int = 0) -> None: + if flags != 0: + raise ValueError("non-zero flags not allowed in calls to sendall") + count = 0 + with memoryview(data) as view, view.cast("B") as byte_view: + amount = len(byte_view) + while count < amount: + v = self.send(byte_view[count:]) + count += v + + def send(self, data: bytes, flags: int = 0) -> int: + if flags != 0: + raise ValueError("non-zero flags not allowed in calls to send") + return self._ssl_io_loop(self.sslobj.write, data) + + def makefile( + self, + mode: str, + buffering: int | None = None, + *, + encoding: str | None = None, + errors: str | None = None, + newline: str | None = None, + ) -> typing.BinaryIO | typing.TextIO | socket.SocketIO: + """ + Python's httpclient uses makefile and buffered io when reading HTTP + messages and we need to support it. + + This is unfortunately a copy and paste of socket.py makefile with small + changes to point to the socket directly. + """ + if not set(mode) <= {"r", "w", "b"}: + raise ValueError(f"invalid mode {mode!r} (only r, w, b allowed)") + + writing = "w" in mode + reading = "r" in mode or not writing + assert reading or writing + binary = "b" in mode + rawmode = "" + if reading: + rawmode += "r" + if writing: + rawmode += "w" + raw = socket.SocketIO(self, rawmode) # type: ignore[arg-type] + self.socket._io_refs += 1 # type: ignore[attr-defined] + if buffering is None: + buffering = -1 + if buffering < 0: + buffering = io.DEFAULT_BUFFER_SIZE + if buffering == 0: + if not binary: + raise ValueError("unbuffered streams must be binary") + return raw + buffer: typing.BinaryIO + if reading and writing: + buffer = io.BufferedRWPair(raw, raw, buffering) # type: ignore[assignment] + elif reading: + buffer = io.BufferedReader(raw, buffering) + else: + assert writing + buffer = io.BufferedWriter(raw, buffering) + if binary: + return buffer + text = io.TextIOWrapper(buffer, encoding, errors, newline) + text.mode = mode # type: ignore[misc] + return text + + def unwrap(self) -> None: + self._ssl_io_loop(self.sslobj.unwrap) + + def close(self) -> None: + self.socket.close() + + @typing.overload + def getpeercert( + self, binary_form: Literal[False] = ... + ) -> _TYPE_PEER_CERT_RET_DICT | None: + ... + + @typing.overload + def getpeercert(self, binary_form: Literal[True]) -> bytes | None: + ... + + def getpeercert(self, binary_form: bool = False) -> _TYPE_PEER_CERT_RET: + return self.sslobj.getpeercert(binary_form) # type: ignore[return-value] + + def version(self) -> str | None: + return self.sslobj.version() + + def cipher(self) -> tuple[str, str, int] | None: + return self.sslobj.cipher() + + def selected_alpn_protocol(self) -> str | None: + return self.sslobj.selected_alpn_protocol() + + def selected_npn_protocol(self) -> str | None: + return self.sslobj.selected_npn_protocol() + + def shared_ciphers(self) -> list[tuple[str, str, int]] | None: + return self.sslobj.shared_ciphers() + + def compression(self) -> str | None: + return self.sslobj.compression() + + def settimeout(self, value: float | None) -> None: + self.socket.settimeout(value) + + def gettimeout(self) -> float | None: + return self.socket.gettimeout() + + def _decref_socketios(self) -> None: + self.socket._decref_socketios() # type: ignore[attr-defined] + + def _wrap_ssl_read(self, len: int, buffer: bytearray | None = None) -> int | bytes: + try: + return self._ssl_io_loop(self.sslobj.read, len, buffer) + except ssl.SSLError as e: + if e.errno == ssl.SSL_ERROR_EOF and self.suppress_ragged_eofs: + return 0 # eof, return 0. + else: + raise + + # func is sslobj.do_handshake or sslobj.unwrap + @typing.overload + def _ssl_io_loop(self, func: typing.Callable[[], None]) -> None: + ... + + # func is sslobj.write, arg1 is data + @typing.overload + def _ssl_io_loop(self, func: typing.Callable[[bytes], int], arg1: bytes) -> int: + ... + + # func is sslobj.read, arg1 is len, arg2 is buffer + @typing.overload + def _ssl_io_loop( + self, + func: typing.Callable[[int, bytearray | None], bytes], + arg1: int, + arg2: bytearray | None, + ) -> bytes: + ... + + def _ssl_io_loop( + self, + func: typing.Callable[..., _ReturnValue], + arg1: None | bytes | int = None, + arg2: bytearray | None = None, + ) -> _ReturnValue: + """Performs an I/O loop between incoming/outgoing and the socket.""" + should_loop = True + ret = None + + while should_loop: + errno = None + try: + if arg1 is None and arg2 is None: + ret = func() + elif arg2 is None: + ret = func(arg1) + else: + ret = func(arg1, arg2) + except ssl.SSLError as e: + if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE): + # WANT_READ, and WANT_WRITE are expected, others are not. + raise e + errno = e.errno + + buf = self.outgoing.read() + self.socket.sendall(buf) + + if errno is None: + should_loop = False + elif errno == ssl.SSL_ERROR_WANT_READ: + buf = self.socket.recv(SSL_BLOCKSIZE) + if buf: + self.incoming.write(buf) + else: + self.incoming.write_eof() + return typing.cast(_ReturnValue, ret) diff --git a/env-llmeval/lib/python3.10/site-packages/urllib3/util/timeout.py b/env-llmeval/lib/python3.10/site-packages/urllib3/util/timeout.py new file mode 100644 index 0000000000000000000000000000000000000000..4bb1be11d9cb06900dd82ecebd06aa6a7c5de916 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/urllib3/util/timeout.py @@ -0,0 +1,275 @@ +from __future__ import annotations + +import time +import typing +from enum import Enum +from socket import getdefaulttimeout + +from ..exceptions import TimeoutStateError + +if typing.TYPE_CHECKING: + from typing import Final + + +class _TYPE_DEFAULT(Enum): + # This value should never be passed to socket.settimeout() so for safety we use a -1. + # socket.settimout() raises a ValueError for negative values. + token = -1 + + +_DEFAULT_TIMEOUT: Final[_TYPE_DEFAULT] = _TYPE_DEFAULT.token + +_TYPE_TIMEOUT = typing.Optional[typing.Union[float, _TYPE_DEFAULT]] + + +class Timeout: + """Timeout configuration. + + Timeouts can be defined as a default for a pool: + + .. code-block:: python + + import urllib3 + + timeout = urllib3.util.Timeout(connect=2.0, read=7.0) + + http = urllib3.PoolManager(timeout=timeout) + + resp = http.request("GET", "https://example.com/") + + print(resp.status) + + Or per-request (which overrides the default for the pool): + + .. code-block:: python + + response = http.request("GET", "https://example.com/", timeout=Timeout(10)) + + Timeouts can be disabled by setting all the parameters to ``None``: + + .. code-block:: python + + no_timeout = Timeout(connect=None, read=None) + response = http.request("GET", "https://example.com/", timeout=no_timeout) + + + :param total: + This combines the connect and read timeouts into one; the read timeout + will be set to the time leftover from the connect attempt. In the + event that both a connect timeout and a total are specified, or a read + timeout and a total are specified, the shorter timeout will be applied. + + Defaults to None. + + :type total: int, float, or None + + :param connect: + The maximum amount of time (in seconds) to wait for a connection + attempt to a server to succeed. Omitting the parameter will default the + connect timeout to the system default, probably `the global default + timeout in socket.py + `_. + None will set an infinite timeout for connection attempts. + + :type connect: int, float, or None + + :param read: + The maximum amount of time (in seconds) to wait between consecutive + read operations for a response from the server. Omitting the parameter + will default the read timeout to the system default, probably `the + global default timeout in socket.py + `_. + None will set an infinite timeout. + + :type read: int, float, or None + + .. note:: + + Many factors can affect the total amount of time for urllib3 to return + an HTTP response. + + For example, Python's DNS resolver does not obey the timeout specified + on the socket. Other factors that can affect total request time include + high CPU load, high swap, the program running at a low priority level, + or other behaviors. + + In addition, the read and total timeouts only measure the time between + read operations on the socket connecting the client and the server, + not the total amount of time for the request to return a complete + response. For most requests, the timeout is raised because the server + has not sent the first byte in the specified time. This is not always + the case; if a server streams one byte every fifteen seconds, a timeout + of 20 seconds will not trigger, even though the request will take + several minutes to complete. + """ + + #: A sentinel object representing the default timeout value + DEFAULT_TIMEOUT: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT + + def __init__( + self, + total: _TYPE_TIMEOUT = None, + connect: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT, + read: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT, + ) -> None: + self._connect = self._validate_timeout(connect, "connect") + self._read = self._validate_timeout(read, "read") + self.total = self._validate_timeout(total, "total") + self._start_connect: float | None = None + + def __repr__(self) -> str: + return f"{type(self).__name__}(connect={self._connect!r}, read={self._read!r}, total={self.total!r})" + + # __str__ provided for backwards compatibility + __str__ = __repr__ + + @staticmethod + def resolve_default_timeout(timeout: _TYPE_TIMEOUT) -> float | None: + return getdefaulttimeout() if timeout is _DEFAULT_TIMEOUT else timeout + + @classmethod + def _validate_timeout(cls, value: _TYPE_TIMEOUT, name: str) -> _TYPE_TIMEOUT: + """Check that a timeout attribute is valid. + + :param value: The timeout value to validate + :param name: The name of the timeout attribute to validate. This is + used to specify in error messages. + :return: The validated and casted version of the given value. + :raises ValueError: If it is a numeric value less than or equal to + zero, or the type is not an integer, float, or None. + """ + if value is None or value is _DEFAULT_TIMEOUT: + return value + + if isinstance(value, bool): + raise ValueError( + "Timeout cannot be a boolean value. It must " + "be an int, float or None." + ) + try: + float(value) + except (TypeError, ValueError): + raise ValueError( + "Timeout value %s was %s, but it must be an " + "int, float or None." % (name, value) + ) from None + + try: + if value <= 0: + raise ValueError( + "Attempted to set %s timeout to %s, but the " + "timeout cannot be set to a value less " + "than or equal to 0." % (name, value) + ) + except TypeError: + raise ValueError( + "Timeout value %s was %s, but it must be an " + "int, float or None." % (name, value) + ) from None + + return value + + @classmethod + def from_float(cls, timeout: _TYPE_TIMEOUT) -> Timeout: + """Create a new Timeout from a legacy timeout value. + + The timeout value used by httplib.py sets the same timeout on the + connect(), and recv() socket requests. This creates a :class:`Timeout` + object that sets the individual timeouts to the ``timeout`` value + passed to this function. + + :param timeout: The legacy timeout value. + :type timeout: integer, float, :attr:`urllib3.util.Timeout.DEFAULT_TIMEOUT`, or None + :return: Timeout object + :rtype: :class:`Timeout` + """ + return Timeout(read=timeout, connect=timeout) + + def clone(self) -> Timeout: + """Create a copy of the timeout object + + Timeout properties are stored per-pool but each request needs a fresh + Timeout object to ensure each one has its own start/stop configured. + + :return: a copy of the timeout object + :rtype: :class:`Timeout` + """ + # We can't use copy.deepcopy because that will also create a new object + # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to + # detect the user default. + return Timeout(connect=self._connect, read=self._read, total=self.total) + + def start_connect(self) -> float: + """Start the timeout clock, used during a connect() attempt + + :raises urllib3.exceptions.TimeoutStateError: if you attempt + to start a timer that has been started already. + """ + if self._start_connect is not None: + raise TimeoutStateError("Timeout timer has already been started.") + self._start_connect = time.monotonic() + return self._start_connect + + def get_connect_duration(self) -> float: + """Gets the time elapsed since the call to :meth:`start_connect`. + + :return: Elapsed time in seconds. + :rtype: float + :raises urllib3.exceptions.TimeoutStateError: if you attempt + to get duration for a timer that hasn't been started. + """ + if self._start_connect is None: + raise TimeoutStateError( + "Can't get connect duration for timer that has not started." + ) + return time.monotonic() - self._start_connect + + @property + def connect_timeout(self) -> _TYPE_TIMEOUT: + """Get the value to use when setting a connection timeout. + + This will be a positive float or integer, the value None + (never timeout), or the default system timeout. + + :return: Connect timeout. + :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None + """ + if self.total is None: + return self._connect + + if self._connect is None or self._connect is _DEFAULT_TIMEOUT: + return self.total + + return min(self._connect, self.total) # type: ignore[type-var] + + @property + def read_timeout(self) -> float | None: + """Get the value for the read timeout. + + This assumes some time has elapsed in the connection timeout and + computes the read timeout appropriately. + + If self.total is set, the read timeout is dependent on the amount of + time taken by the connect timeout. If the connection time has not been + established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be + raised. + + :return: Value to use for the read timeout. + :rtype: int, float or None + :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect` + has not yet been called on this object. + """ + if ( + self.total is not None + and self.total is not _DEFAULT_TIMEOUT + and self._read is not None + and self._read is not _DEFAULT_TIMEOUT + ): + # In case the connect timeout has not yet been established. + if self._start_connect is None: + return self._read + return max(0, min(self.total - self.get_connect_duration(), self._read)) + elif self.total is not None and self.total is not _DEFAULT_TIMEOUT: + return max(0, self.total - self.get_connect_duration()) + else: + return self.resolve_default_timeout(self._read)